From 99ff6397c3e130c3b2ba7ed689ef7590316e915a Mon Sep 17 00:00:00 2001 From: Jannis Pohlmann Date: Fri, 29 Sep 2023 21:45:48 +0200 Subject: [PATCH 1/4] refactor: use DeploymentId type from toolshed crate --- Cargo.lock | 1 + common/src/allocations/mod.rs | 78 +++++----- common/src/allocations/monitor.rs | 4 +- common/src/attestations/mod.rs | 60 ++++---- common/src/attestations/signer.rs | 13 +- common/src/attestations/signers.rs | 7 +- common/src/lib.rs | 2 - common/src/test_vectors.rs | 39 ++--- common/src/types.rs | 143 ------------------- service/Cargo.toml | 1 + service/src/common/indexer_management/mod.rs | 12 +- service/src/escrow_monitor.rs | 14 +- service/src/graph_node.rs | 43 ++++-- service/src/main.rs | 4 +- service/src/query_processor.rs | 87 ++--------- service/src/server/routes/subgraphs.rs | 8 +- 16 files changed, 159 insertions(+), 357 deletions(-) delete mode 100644 common/src/types.rs diff --git a/Cargo.lock b/Cargo.lock index 038b33d0..beada77c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4744,6 +4744,7 @@ dependencies = [ "thiserror", "tokio", "toml 0.7.4", + "toolshed", "tower", "tower-http 0.4.0", "tracing", diff --git a/common/src/allocations/mod.rs b/common/src/allocations/mod.rs index 4e050f2d..f25e4c75 100644 --- a/common/src/allocations/mod.rs +++ b/common/src/allocations/mod.rs @@ -1,7 +1,7 @@ // Copyright 2023-, GraphOps and Semiotic Labs. // SPDX-License-Identifier: Apache-2.0 -use alloy_primitives::Address; +use alloy_primitives::{Address, B256}; use anyhow::Result; use ethers::signers::coins_bip39::English; use ethers::signers::{MnemonicBuilder, Signer, Wallet}; @@ -9,8 +9,7 @@ use ethers_core::k256::ecdsa::SigningKey; use ethers_core::types::U256; use serde::Deserialize; use serde::Deserializer; - -use crate::types::SubgraphDeploymentID; +use toolshed::thegraph::DeploymentId; pub mod monitor; @@ -40,9 +39,22 @@ pub enum AllocationStatus { Claimed, } +// Custom deserializer for `DeploymentId` that accepts a `0x...` string +fn deserialize_deployment_id<'de, D>(deserializer: D) -> Result +where + D: Deserializer<'de>, +{ + let bytes = B256::deserialize(deserializer)?; + Ok(DeploymentId(bytes)) +} + #[derive(Debug, Eq, PartialEq, Deserialize)] pub struct SubgraphDeployment { - pub id: SubgraphDeploymentID, + // This neeeds a custom deserialize function because it's returned from the + // network subgraph as a hex string but `DeploymentId` assumes an IPFS hash + // in it's `Deserialize` implementation + #[serde(deserialize_with = "deserialize_deployment_id")] + pub id: DeploymentId, #[serde(rename = "deniedAt")] pub denied_at: Option, #[serde(rename = "stakedTokens")] @@ -98,13 +110,13 @@ impl<'d> Deserialize<'d> for Allocation { pub fn derive_key_pair( indexer_mnemonic: &str, epoch: u64, - deployment: &SubgraphDeploymentID, + deployment: &DeploymentId, index: u64, ) -> Result> { let mut derivation_path = format!("m/{}/", epoch); derivation_path.push_str( &deployment - .ipfs_hash() + .to_string() .as_bytes() .iter() .map(|char| char.to_string()) @@ -146,45 +158,37 @@ pub fn allocation_signer(indexer_mnemonic: &str, allocation: &Allocation) -> Res #[cfg(test)] mod test { + use lazy_static::lazy_static; use std::str::FromStr; - - use crate::prelude::SubgraphDeploymentID; + use toolshed::thegraph::DeploymentId; use super::*; const INDEXER_OPERATOR_MNEMONIC: &str = "abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon about"; + lazy_static! { + static ref DEPLOYMENT_ID: DeploymentId = DeploymentId( + "0xbbde25a2c85f55b53b7698b9476610c3d1202d88870e66502ab0076b7218f98a" + .parse() + .unwrap(), + ); + } + #[test] fn test_derive_key_pair() { assert_eq!( - derive_key_pair( - INDEXER_OPERATOR_MNEMONIC, - 953, - &SubgraphDeploymentID::new( - "0xbbde25a2c85f55b53b7698b9476610c3d1202d88870e66502ab0076b7218f98a" - ) - .unwrap(), - 0 - ) - .unwrap() - .address() - .as_fixed_bytes(), + derive_key_pair(INDEXER_OPERATOR_MNEMONIC, 953, &DEPLOYMENT_ID, 0) + .unwrap() + .address() + .as_fixed_bytes(), Address::from_str("0xfa44c72b753a66591f241c7dc04e8178c30e13af").unwrap() ); assert_eq!( - derive_key_pair( - INDEXER_OPERATOR_MNEMONIC, - 940, - &SubgraphDeploymentID::new( - "0xbbde25a2c85f55b53b7698b9476610c3d1202d88870e66502ab0076b7218f98a" - ) - .unwrap(), - 2 - ) - .unwrap() - .address() - .as_fixed_bytes(), + derive_key_pair(INDEXER_OPERATOR_MNEMONIC, 940, &DEPLOYMENT_ID, 2) + .unwrap() + .address() + .as_fixed_bytes(), Address::from_str("0xa171cd12c3dde7eb8fe7717a0bcd06f3ffa65658").unwrap() ); } @@ -197,10 +201,7 @@ mod test { id: Address::from_str("0xa171cd12c3dde7eb8fe7717a0bcd06f3ffa65658").unwrap(), status: AllocationStatus::Null, subgraph_deployment: SubgraphDeployment { - id: SubgraphDeploymentID::new( - "0xbbde25a2c85f55b53b7698b9476610c3d1202d88870e66502ab0076b7218f98a", - ) - .unwrap(), + id: *DEPLOYMENT_ID, denied_at: None, staked_tokens: U256::zero(), signalled_tokens: U256::zero(), @@ -239,10 +240,7 @@ mod test { id: Address::from_str("0xdeadbeefcafebabedeadbeefcafebabedeadbeef").unwrap(), status: AllocationStatus::Null, subgraph_deployment: SubgraphDeployment { - id: SubgraphDeploymentID::new( - "0xbbde25a2c85f55b53b7698b9476610c3d1202d88870e66502ab0076b7218f98a", - ) - .unwrap(), + id: *DEPLOYMENT_ID, denied_at: None, staked_tokens: U256::zero(), signalled_tokens: U256::zero(), diff --git a/common/src/allocations/monitor.rs b/common/src/allocations/monitor.rs index 7ddea796..5c3426b9 100644 --- a/common/src/allocations/monitor.rs +++ b/common/src/allocations/monitor.rs @@ -246,9 +246,7 @@ impl AllocationMonitor { .map(|e| { format!( "{{allocation: {:?}, deployment: {}, closedAtEpoch: {:?})}}", - e.id, - e.subgraph_deployment.id.ipfs_hash(), - e.closed_at_epoch + e.id, e.subgraph_deployment.id, e.closed_at_epoch ) }) .collect::>() diff --git a/common/src/attestations/mod.rs b/common/src/attestations/mod.rs index 21ee4aa5..0a5f0e74 100644 --- a/common/src/attestations/mod.rs +++ b/common/src/attestations/mod.rs @@ -7,8 +7,9 @@ use ethers::signers::MnemonicBuilder; use ethers::signers::Signer; use ethers::signers::Wallet; use ethers_core::k256::ecdsa::SigningKey; +use toolshed::thegraph::DeploymentId; -use crate::prelude::{Allocation, SubgraphDeploymentID}; +use crate::prelude::Allocation; pub mod signer; pub mod signers; @@ -16,13 +17,13 @@ pub mod signers; pub fn derive_key_pair( indexer_mnemonic: &str, epoch: u64, - deployment: &SubgraphDeploymentID, + deployment: &DeploymentId, index: u64, ) -> Result> { let mut derivation_path = format!("m/{}/", epoch); derivation_path.push_str( &deployment - .ipfs_hash() + .to_string() .as_bytes() .iter() .map(|char| char.to_string()) @@ -69,46 +70,39 @@ pub fn attestation_signer_for_allocation( mod tests { use alloy_primitives::Address; use ethers_core::types::U256; + use lazy_static::lazy_static; use std::str::FromStr; use test_log::test; - use crate::prelude::{Allocation, AllocationStatus, SubgraphDeployment, SubgraphDeploymentID}; + use crate::prelude::{Allocation, AllocationStatus, SubgraphDeployment}; use super::*; const INDEXER_OPERATOR_MNEMONIC: &str = "abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon about"; + lazy_static! { + static ref DEPLOYMENT_ID: DeploymentId = DeploymentId( + "0xbbde25a2c85f55b53b7698b9476610c3d1202d88870e66502ab0076b7218f98a" + .parse() + .unwrap(), + ); + } + #[test] fn test_derive_key_pair() { assert_eq!( - derive_key_pair( - INDEXER_OPERATOR_MNEMONIC, - 953, - &SubgraphDeploymentID::new( - "0xbbde25a2c85f55b53b7698b9476610c3d1202d88870e66502ab0076b7218f98a" - ) - .unwrap(), - 0 - ) - .unwrap() - .address() - .as_fixed_bytes(), + derive_key_pair(INDEXER_OPERATOR_MNEMONIC, 953, &DEPLOYMENT_ID, 0) + .unwrap() + .address() + .to_fixed_bytes(), Address::from_str("0xfa44c72b753a66591f241c7dc04e8178c30e13af").unwrap() ); assert_eq!( - derive_key_pair( - INDEXER_OPERATOR_MNEMONIC, - 940, - &SubgraphDeploymentID::new( - "0xbbde25a2c85f55b53b7698b9476610c3d1202d88870e66502ab0076b7218f98a" - ) - .unwrap(), - 2 - ) - .unwrap() - .address() - .as_fixed_bytes(), + derive_key_pair(INDEXER_OPERATOR_MNEMONIC, 940, &DEPLOYMENT_ID, 2) + .unwrap() + .address() + .as_fixed_bytes(), Address::from_str("0xa171cd12c3dde7eb8fe7717a0bcd06f3ffa65658").unwrap() ); } @@ -121,10 +115,7 @@ mod tests { id: Address::from_str("0xa171cd12c3dde7eb8fe7717a0bcd06f3ffa65658").unwrap(), status: AllocationStatus::Null, subgraph_deployment: SubgraphDeployment { - id: SubgraphDeploymentID::new( - "0xbbde25a2c85f55b53b7698b9476610c3d1202d88870e66502ab0076b7218f98a", - ) - .unwrap(), + id: *DEPLOYMENT_ID, denied_at: None, staked_tokens: U256::zero(), signalled_tokens: U256::zero(), @@ -163,10 +154,7 @@ mod tests { id: Address::from_str("0xdeadbeefcafebabedeadbeefcafebabedeadbeef").unwrap(), status: AllocationStatus::Null, subgraph_deployment: SubgraphDeployment { - id: SubgraphDeploymentID::new( - "0xbbde25a2c85f55b53b7698b9476610c3d1202d88870e66502ab0076b7218f98a", - ) - .unwrap(), + id: *DEPLOYMENT_ID, denied_at: None, staked_tokens: U256::zero(), signalled_tokens: U256::zero(), diff --git a/common/src/attestations/signer.rs b/common/src/attestations/signer.rs index a0119188..393da94e 100644 --- a/common/src/attestations/signer.rs +++ b/common/src/attestations/signer.rs @@ -11,11 +11,12 @@ use ethers_core::types::U256; use keccak_hash::keccak; use secp256k1::SecretKey; use std::convert::TryInto; +use toolshed::thegraph::DeploymentId; /// An attestation signer tied to a specific allocation via its signer key #[derive(Debug, Clone)] pub struct AttestationSigner { - subgraph_deployment_id: Bytes32, + subgraph_deployment_id: DeploymentId, domain_separator: DomainSeparator, signer: SecretKey, } @@ -25,7 +26,7 @@ impl AttestationSigner { chain_id: eip_712_derive::U256, dispute_manager: Address, signer: SecretKey, - subgraph_deployment_id: Bytes32, + deployment_id: DeploymentId, ) -> Self { let bytes = hex::decode("a070ffb1cd7409649bf77822cce74495468e06dbfaef09556838bf188679b9c2") .unwrap(); @@ -44,7 +45,7 @@ impl AttestationSigner { Self { domain_separator, signer, - subgraph_deployment_id, + subgraph_deployment_id: deployment_id, } } @@ -55,7 +56,7 @@ impl AttestationSigner { let receipt = Receipt { request_cid, response_cid, - subgraph_deployment_id: self.subgraph_deployment_id, + subgraph_deployment_id: *self.subgraph_deployment_id.0, }; // Unwrap: This can only fail if the SecretKey is invalid. @@ -69,7 +70,7 @@ impl AttestationSigner { v, r, s, - subgraph_deployment_id: self.subgraph_deployment_id, + subgraph_deployment_id: *self.subgraph_deployment_id.0, request_cid, response_cid, } @@ -106,7 +107,7 @@ pub fn create_attestation_signer( chain_id: U256, dispute_manager_address: Address, signer: SigningKey, - deployment_id: [u8; 32], + deployment_id: DeploymentId, ) -> anyhow::Result { // Tedious conversions to the "indexer_native" types let mut chain_id_bytes = [0u8; 32]; diff --git a/common/src/attestations/signers.rs b/common/src/attestations/signers.rs index d362826b..62c1a336 100644 --- a/common/src/attestations/signers.rs +++ b/common/src/attestations/signers.rs @@ -70,21 +70,20 @@ impl AttestationSigners { inner.chain_id, inner.dispute_manager, signer, - allocation.subgraph_deployment.id.bytes32(), + allocation.subgraph_deployment.id, ) }) { Ok(signer) => { e.insert(signer); info!( "Found attestation signer for {{allocation: {}, deployment: {}}}", - allocation.id, - allocation.subgraph_deployment.id.ipfs_hash() + allocation.id, allocation.subgraph_deployment.id, ); } Err(e) => { warn!( "Failed to find the attestation signer for {{allocation: {}, deployment: {}, createdAtEpoch: {}, err: {}}}", - allocation.id, allocation.subgraph_deployment.id.ipfs_hash(), allocation.created_at_epoch, e + allocation.id, allocation.subgraph_deployment.id, allocation.created_at_epoch, e ) } } diff --git a/common/src/lib.rs b/common/src/lib.rs index b8e06310..b20ddcaa 100644 --- a/common/src/lib.rs +++ b/common/src/lib.rs @@ -6,7 +6,6 @@ pub mod attestations; pub mod graphql; pub mod network_subgraph; pub mod signature_verification; -pub mod types; #[cfg(test)] mod test_vectors; @@ -20,5 +19,4 @@ pub mod prelude { signers::AttestationSigners, }; pub use super::network_subgraph::NetworkSubgraph; - pub use super::types::*; } diff --git a/common/src/test_vectors.rs b/common/src/test_vectors.rs index d4f3e2f0..16314c88 100644 --- a/common/src/test_vectors.rs +++ b/common/src/test_vectors.rs @@ -5,8 +5,9 @@ use std::{collections::HashMap, str::FromStr}; use alloy_primitives::Address; use ethers_core::types::U256; +use toolshed::thegraph::DeploymentId; -use crate::prelude::{Allocation, AllocationStatus, SubgraphDeployment, SubgraphDeploymentID}; +use crate::prelude::{Allocation, AllocationStatus, SubgraphDeployment}; pub const INDEXER_OPERATOR_MNEMONIC: &str = "abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon about"; @@ -120,10 +121,11 @@ pub fn expected_eligible_allocations() -> HashMap { created_at_epoch: 953, closed_at_epoch: None, subgraph_deployment: SubgraphDeployment { - id: SubgraphDeploymentID::new( - "0xbbde25a2c85f55b53b7698b9476610c3d1202d88870e66502ab0076b7218f98a", - ) - .unwrap(), + id: DeploymentId( + "0xbbde25a2c85f55b53b7698b9476610c3d1202d88870e66502ab0076b7218f98a" + .parse() + .unwrap(), + ), denied_at: Some(0), staked_tokens: U256::from_str("96183284152000000014901161").unwrap(), signalled_tokens: U256::from_str("182832939554154667498047").unwrap(), @@ -148,10 +150,11 @@ pub fn expected_eligible_allocations() -> HashMap { created_at_epoch: 953, closed_at_epoch: None, subgraph_deployment: SubgraphDeployment { - id: SubgraphDeploymentID::new( - "0xcda7fa0405d6fd10721ed13d18823d24b535060d8ff661f862b26c23334f13bf", - ) - .unwrap(), + id: DeploymentId( + "0xcda7fa0405d6fd10721ed13d18823d24b535060d8ff661f862b26c23334f13bf" + .parse() + .unwrap(), + ), denied_at: Some(0), staked_tokens: U256::from_str("53885041676589999979510903").unwrap(), signalled_tokens: U256::from_str("104257136417832003117925").unwrap(), @@ -176,10 +179,11 @@ pub fn expected_eligible_allocations() -> HashMap { created_at_epoch: 940, closed_at_epoch: Some(953), subgraph_deployment: SubgraphDeployment { - id: SubgraphDeploymentID::new( - "0xbbde25a2c85f55b53b7698b9476610c3d1202d88870e66502ab0076b7218f98a", - ) - .unwrap(), + id: DeploymentId( + "0xbbde25a2c85f55b53b7698b9476610c3d1202d88870e66502ab0076b7218f98a" + .parse() + .unwrap(), + ), denied_at: Some(0), staked_tokens: U256::from_str("96183284152000000014901161").unwrap(), signalled_tokens: U256::from_str("182832939554154667498047").unwrap(), @@ -204,10 +208,11 @@ pub fn expected_eligible_allocations() -> HashMap { created_at_epoch: 940, closed_at_epoch: Some(953), subgraph_deployment: SubgraphDeployment { - id: SubgraphDeploymentID::new( - "0xc064c354bc21dd958b1d41b67b8ef161b75d2246b425f68ed4c74964ae705cbd", - ) - .unwrap(), + id: DeploymentId( + "0xc064c354bc21dd958b1d41b67b8ef161b75d2246b425f68ed4c74964ae705cbd" + .parse() + .unwrap(), + ), denied_at: Some(0), staked_tokens: U256::from_str("85450761241000000055879354").unwrap(), signalled_tokens: U256::from_str("154944508746646550301048").unwrap(), diff --git a/common/src/types.rs b/common/src/types.rs deleted file mode 100644 index cf30b419..00000000 --- a/common/src/types.rs +++ /dev/null @@ -1,143 +0,0 @@ -// Copyright 2023-, GraphOps and Semiotic Labs. -// SPDX-License-Identifier: Apache-2.0 - -use ethers::utils::hex; -use serde::Deserialize; - -/// Subgraph identifier type: SubgraphDeploymentID with field 'value' -#[derive(Debug, Clone, PartialEq, Eq, Hash)] -pub struct SubgraphDeploymentID { - // bytes32 subgraph deployment Id - value: [u8; 32], -} - -/// Implement deserialization for SubgraphDeploymentID -/// Deserialize from hex string or IPFS multihash string -impl<'de> Deserialize<'de> for SubgraphDeploymentID { - fn deserialize(deserializer: D) -> Result - where - D: serde::Deserializer<'de>, - { - let s = String::deserialize(deserializer)?; - SubgraphDeploymentID::new(&s).map_err(serde::de::Error::custom) - } -} - -/// Implement SubgraphDeploymentID functions -impl SubgraphDeploymentID { - /// Construct SubgraphDeploymentID from a string - /// Validate IPFS hash or hex format before decoding - pub fn new(id: &str) -> anyhow::Result { - SubgraphDeploymentID::from_hex(id) - .or_else(|_| SubgraphDeploymentID::from_ipfs_hash(id)) - .map_err(|_| anyhow::anyhow!("Invalid subgraph deployment ID: {}", id)) - } - - /// Construct SubgraphDeploymentID from a 32 bytes hex string. - /// The '0x' prefix is optional. - /// - /// Returns an error if the input is not a valid hex string or if the input is not 32 bytes long. - pub fn from_hex(id: &str) -> anyhow::Result { - let mut buf = [0u8; 32]; - hex::decode_to_slice(id.trim_start_matches("0x"), &mut buf)?; - Ok(SubgraphDeploymentID { value: buf }) - } - - /// Construct SubgraphDeploymentID from a 34 bytes IPFS multihash string. - /// The 'Qm' prefix is mandatory. - /// - /// Returns an error if the input is not a valid IPFS multihash string or if the input is not 34 bytes long. - pub fn from_ipfs_hash(hash: &str) -> anyhow::Result { - let bytes = bs58::decode(hash).into_vec()?; - let value = bytes[2..].try_into()?; - Ok(SubgraphDeploymentID { value }) - } - - /// Returns the subgraph deployment ID as a 32 bytes array. - pub fn bytes32(&self) -> [u8; 32] { - self.value - } - - /// Returns the subgraph deployment ID as a 34 bytes IPFS multihash string. - pub fn ipfs_hash(&self) -> String { - let value = self.value; - let mut bytes: Vec = vec![0x12, 0x20]; - bytes.extend(value.to_vec()); - bs58::encode(bytes).into_string() - } - - /// Returns the subgraph deployment ID as a 32 bytes hex string. - /// The '0x' prefix is included. - pub fn hex(&self) -> String { - format!("0x{}", hex::encode(self.value)) - } -} - -impl ToString for SubgraphDeploymentID { - fn to_string(&self) -> String { - self.hex() - } -} - -#[cfg(test)] -mod tests { - - use super::*; - - #[test] - fn hex_to_ipfs_multihash() { - let deployment_id = "0xd0b0e5b65df45a3fff1a653b4188881318e8459d3338f936aab16c4003884abf"; - let expected_ipfs_hash = "QmcPHxcC2ZN7m79XfYZ77YmF4t9UCErv87a9NFKrSLWKtJ"; - - assert_eq!( - SubgraphDeploymentID::from_hex(deployment_id) - .unwrap() - .ipfs_hash(), - expected_ipfs_hash - ); - } - - #[test] - fn ipfs_multihash_to_hex() { - let deployment_id = "0xd0b0e5b65df45a3fff1a653b4188881318e8459d3338f936aab16c4003884abf"; - let ipfs_hash = "QmcPHxcC2ZN7m79XfYZ77YmF4t9UCErv87a9NFKrSLWKtJ"; - - assert_eq!( - SubgraphDeploymentID::from_ipfs_hash(ipfs_hash) - .unwrap() - .to_string(), - deployment_id - ); - } - - #[test] - fn subgraph_deployment_id_input_validation_success() { - let deployment_id = "0xd0b0e5b65df45a3fff1a653b4188881318e8459d3338f936aab16c4003884abf"; - let ipfs_hash = "QmcPHxcC2ZN7m79XfYZ77YmF4t9UCErv87a9NFKrSLWKtJ"; - - assert_eq!( - SubgraphDeploymentID::new(ipfs_hash).unwrap().to_string(), - deployment_id - ); - - assert_eq!( - SubgraphDeploymentID::new(deployment_id) - .unwrap() - .ipfs_hash(), - ipfs_hash - ); - } - - #[test] - fn subgraph_deployment_id_input_validation_fail() { - let invalid_deployment_id = - "0xd0b0e5b65df45a3fff1a653b4188881318e8459d3338f936aab16c4003884a"; - let invalid_ipfs_hash = "Qm1234"; - - let res = SubgraphDeploymentID::new(invalid_deployment_id); - assert!(res.is_err()); - - let res = SubgraphDeploymentID::new(invalid_ipfs_hash); - assert!(res.is_err()); - } -} diff --git a/service/Cargo.toml b/service/Cargo.toml index 8b9e2e17..c0bb3a94 100644 --- a/service/Cargo.toml +++ b/service/Cargo.toml @@ -47,6 +47,7 @@ sqlx = { version = "0.7.1", features = ["postgres", "runtime-tokio", "bigdecimal alloy-primitives = { version = "0.3.3", features = ["serde"] } alloy-sol-types = "0.3.2" lazy_static = "1.4.0" +toolshed = { git = "https://github.com/edgeandnode/toolshed", tag = "v0.2.2", features = ["graphql"] } [dev-dependencies] faux = "0.1.10" diff --git a/service/src/common/indexer_management/mod.rs b/service/src/common/indexer_management/mod.rs index 62c86036..00a4334a 100644 --- a/service/src/common/indexer_management/mod.rs +++ b/service/src/common/indexer_management/mod.rs @@ -6,8 +6,6 @@ use serde::{Deserialize, Serialize}; use serde_json::Value; use sqlx::{FromRow, PgPool}; -use indexer_common::prelude::SubgraphDeploymentID; - use super::indexer_error::{IndexerError, IndexerErrorCause, IndexerErrorCode}; #[derive(Debug, FromRow, Clone, Serialize, Deserialize, SimpleObject)] @@ -25,10 +23,7 @@ pub async fn cost_models( pool: &PgPool, deployments: &[String], ) -> Result, IndexerError> { - let deployment_ids = deployments - .iter() - .map(|d| SubgraphDeploymentID::new(d).unwrap().to_string()) - .collect::>(); + let deployment_ids = deployments.to_vec(); let models = if deployment_ids.is_empty() { sqlx::query_as!( CostModel, @@ -93,7 +88,6 @@ pub async fn cost_model( pool: &PgPool, deployment: &str, ) -> Result, IndexerError> { - let deployment_id = SubgraphDeploymentID::new(deployment).unwrap().to_string(); let model = sqlx::query_as!( CostModel, r#" @@ -101,7 +95,7 @@ pub async fn cost_model( FROM "CostModels" WHERE deployment = $1 "#, - deployment_id + deployment ) .fetch_optional(pool) .await @@ -113,7 +107,7 @@ pub async fn cost_model( || { Some(merge_global( CostModel { - deployment: deployment.to_string(), + deployment: deployment.into(), model: None, variables: None, }, diff --git a/service/src/escrow_monitor.rs b/service/src/escrow_monitor.rs index 44f03274..7ea98288 100644 --- a/service/src/escrow_monitor.rs +++ b/service/src/escrow_monitor.rs @@ -10,13 +10,14 @@ use serde_json::json; use std::collections::HashMap; use std::sync::Arc; use tokio::sync::RwLock; +use toolshed::thegraph::DeploymentId; use crate::graph_node::GraphNodeInstance; #[derive(Debug)] struct EscrowMonitorInner { graph_node: GraphNodeInstance, - escrow_subgraph_deployment: String, + escrow_subgraph_deployment: DeploymentId, indexer_address: Address, interval_ms: u64, sender_accounts: Arc>>, @@ -33,7 +34,7 @@ pub struct EscrowMonitor { impl EscrowMonitor { pub async fn new( graph_node: GraphNodeInstance, - escrow_subgraph_deployment: String, + escrow_subgraph_deployment: DeploymentId, indexer_address: Address, interval_ms: u64, ) -> Result { @@ -61,7 +62,7 @@ impl EscrowMonitor { async fn current_accounts( graph_node: &GraphNodeInstance, - escrow_subgraph_deployment: &str, + escrow_subgraph_deployment: &DeploymentId, indexer_address: &Address, ) -> Result> { // These 2 structs are used to deserialize the response from the escrow subgraph. @@ -192,14 +193,15 @@ mod tests { #[tokio::test] async fn test_current_accounts() { let indexer_address = Address::from_str(test_vectors::INDEXER_ADDRESS).unwrap(); - let escrow_subgraph_deployment = "Qmabcdefghijklmnopqrstuvwxyz1234567890ABCDEFGH"; + let escrow_subgraph_deployment = + DeploymentId::from_str("Qmb5Ysp5oCUXhLA8NmxmYKDAX2nCMnh7Vvb5uffb9n5vss").unwrap(); let mock_server = MockServer::start().await; let graph_node = graph_node::GraphNodeInstance::new(&mock_server.uri()); let mock = Mock::given(method("POST")) .and(path( - "/subgraphs/id/".to_string() + escrow_subgraph_deployment, + "/subgraphs/id/".to_string() + &escrow_subgraph_deployment.to_string(), )) .respond_with( ResponseTemplate::new(200) @@ -209,7 +211,7 @@ mod tests { let inner = EscrowMonitorInner { graph_node, - escrow_subgraph_deployment: escrow_subgraph_deployment.to_string(), + escrow_subgraph_deployment, indexer_address, interval_ms: 1000, sender_accounts: Arc::new(RwLock::new(HashMap::new())), diff --git a/service/src/graph_node.rs b/service/src/graph_node.rs index fe2b3de4..7c875764 100644 --- a/service/src/graph_node.rs +++ b/service/src/graph_node.rs @@ -1,10 +1,10 @@ // Copyright 2023-, GraphOps and Semiotic Labs. // SPDX-License-Identifier: Apache-2.0 -use std::sync::Arc; - use anyhow::anyhow; use reqwest::{header, Client, Url}; +use std::sync::Arc; +use toolshed::thegraph::DeploymentId; use crate::query_processor::{QueryError, UnattestedQueryResult}; @@ -34,17 +34,21 @@ impl GraphNodeInstance { pub async fn subgraph_query_raw( &self, - subgraph_id: &str, + subgraph_id: &DeploymentId, data: String, ) -> Result { let request = self .client - .post(self.subgraphs_base_url.join(subgraph_id).map_err(|e| { - QueryError::Other(anyhow!( - "Could not build subgraph query URL: {}", - e.to_string() - )) - })?) + .post( + self.subgraphs_base_url + .join(&subgraph_id.to_string()) + .map_err(|e| { + QueryError::Other(anyhow!( + "Could not build subgraph query URL: {}", + e.to_string() + )) + })?, + ) .body(data) .header(header::CONTENT_TYPE, "application/json"); @@ -63,18 +67,26 @@ impl GraphNodeInstance { #[cfg(test)] mod test { + use std::str::FromStr; + + use lazy_static::lazy_static; use serde_json::json; use wiremock::matchers::{method, path}; use wiremock::{Mock, MockServer, ResponseTemplate}; use super::*; - const NETWORK_SUBGRAPH_ID: &str = "QmV614UpBCpuusv5MsismmPYu4KqLtdeNMKpiNrX56kw6u"; + lazy_static! { + static ref NETWORK_SUBGRAPH_ID: DeploymentId = + DeploymentId::from_str("QmV614UpBCpuusv5MsismmPYu4KqLtdeNMKpiNrX56kw6u").unwrap(); + } async fn mock_graph_node_server() -> MockServer { let mock_server = MockServer::start().await; let mock = Mock::given(method("POST")) - .and(path("/subgraphs/id/".to_string() + NETWORK_SUBGRAPH_ID)) + .and(path( + "/subgraphs/id/".to_string() + &NETWORK_SUBGRAPH_ID.to_string(), + )) .respond_with(ResponseTemplate::new(200).set_body_raw( r#" { @@ -103,8 +115,11 @@ mod test { #[tokio::test] #[ignore] // Run only if explicitly specified async fn test_subgraph_query_local() { - let network_subgraph_id = std::env::var("NETWORK_SUBGRAPH_ID") - .expect("NETWORK_SUBGRAPH_ID env variable is not set"); + let network_subgraph_id = DeploymentId::from_str( + &std::env::var("NETWORK_SUBGRAPH_ID") + .expect("NETWORK_SUBGRAPH_ID env variable is not set"), + ) + .unwrap(); let graph_node = local_graph_node().await; @@ -151,7 +166,7 @@ mod test { }); let response = graph_node - .subgraph_query_raw(NETWORK_SUBGRAPH_ID, query_json.to_string()) + .subgraph_query_raw(&NETWORK_SUBGRAPH_ID, query_json.to_string()) .await .unwrap(); diff --git a/service/src/main.rs b/service/src/main.rs index 5763a35e..cdbcb980 100644 --- a/service/src/main.rs +++ b/service/src/main.rs @@ -7,6 +7,7 @@ use axum::Server; use dotenvy::dotenv; use ethereum_types::U256; use std::{net::SocketAddr, str::FromStr}; +use toolshed::thegraph::DeploymentId; use tracing::info; use indexer_common::prelude::{AllocationMonitor, AttestationSigners, NetworkSubgraph}; @@ -99,7 +100,8 @@ async fn main() -> Result<(), std::io::Error> { let escrow_monitor = escrow_monitor::EscrowMonitor::new( graph_node.clone(), - config.escrow_subgraph.escrow_subgraph_deployment, + DeploymentId::from_str(&config.escrow_subgraph.escrow_subgraph_deployment) + .expect("escrow deployment ID is invalid"), config.ethereum.indexer_address, config.escrow_subgraph.escrow_syncing_interval, ) diff --git a/service/src/query_processor.rs b/service/src/query_processor.rs index ae0b683c..17a1120f 100644 --- a/service/src/query_processor.rs +++ b/service/src/query_processor.rs @@ -5,8 +5,9 @@ use ethers_core::types::{Signature, U256}; use log::error; use serde::{Deserialize, Serialize}; use tap_core::tap_manager::SignedReceipt; +use toolshed::thegraph::DeploymentId; -use indexer_common::prelude::{AttestationSigner, AttestationSigners, SubgraphDeploymentID}; +use indexer_common::prelude::{AttestationSigner, AttestationSigners}; use crate::graph_node::GraphNodeInstance; use crate::tap_manager::TapManager; @@ -36,13 +37,13 @@ pub struct Response { /// Later add along with PaidQuery #[derive(Debug)] pub struct FreeQuery { - pub subgraph_deployment_id: SubgraphDeploymentID, + pub subgraph_deployment_id: DeploymentId, pub query: String, } /// Paid query needs subgraph_deployment_id, query, receipt pub struct PaidQuery { - pub subgraph_deployment_id: SubgraphDeploymentID, + pub subgraph_deployment_id: DeploymentId, pub query: String, pub receipt: String, } @@ -55,15 +56,11 @@ pub enum QueryError { IndexingError, #[error("Bad or invalid entity data found in the subgraph: {}", .0.to_string())] BadData(anyhow::Error), - #[error("Invalid GraphQL query string: {0}")] - InvalidFormat(String), - #[error("Cannot query field: {:#?}", .0)] - UnsupportedFields(Vec), #[error("Unknown error: {0}")] Other(anyhow::Error), } -#[derive(Debug, Clone)] +#[derive(Clone, Debug)] pub struct QueryProcessor { graph_node: GraphNodeInstance, attestation_signers: AttestationSigners, @@ -89,7 +86,7 @@ impl QueryProcessor { ) -> Result, QueryError> { let response = self .graph_node - .subgraph_query_raw(&query.subgraph_deployment_id.ipfs_hash(), query.query) + .subgraph_query_raw(&query.subgraph_deployment_id, query.query) .await?; Ok(Response { @@ -128,7 +125,7 @@ impl QueryProcessor { let response = self .graph_node - .subgraph_query_raw(&subgraph_deployment_id.ipfs_hash(), query.clone()) + .subgraph_query_raw(&subgraph_deployment_id, query.clone()) .await?; let attestation_signature = response @@ -169,78 +166,25 @@ mod tests { attestation_signer_for_allocation, create_attestation_signer, Allocation, AllocationStatus, SubgraphDeployment, }; + use lazy_static::lazy_static; use super::*; const INDEXER_OPERATOR_MNEMONIC: &str = "abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon abandon about"; const INDEXER_ADDRESS: &str = "0x1234567890123456789012345678901234567890"; - #[test] - fn hex_to_ipfs_multihash() { - let deployment_id = "0xd0b0e5b65df45a3fff1a653b4188881318e8459d3338f936aab16c4003884abf"; - let expected_ipfs_hash = "QmcPHxcC2ZN7m79XfYZ77YmF4t9UCErv87a9NFKrSLWKtJ"; - - assert_eq!( - SubgraphDeploymentID::from_hex(deployment_id) - .unwrap() - .ipfs_hash(), - expected_ipfs_hash - ); - } - - #[test] - fn ipfs_multihash_to_hex() { - let deployment_id = "0xd0b0e5b65df45a3fff1a653b4188881318e8459d3338f936aab16c4003884abf"; - let ipfs_hash = "QmcPHxcC2ZN7m79XfYZ77YmF4t9UCErv87a9NFKrSLWKtJ"; - - assert_eq!( - SubgraphDeploymentID::from_ipfs_hash(ipfs_hash) - .unwrap() - .to_string(), - deployment_id + lazy_static! { + static ref DEPLOYMENT_ID: DeploymentId = DeploymentId( + "0xc064c354bc21dd958b1d41b67b8ef161b75d2246b425f68ed4c74964ae705cbd" + .parse() + .unwrap(), ); } - #[test] - fn subgraph_deployment_id_input_validation_success() { - let deployment_id = "0xd0b0e5b65df45a3fff1a653b4188881318e8459d3338f936aab16c4003884abf"; - let ipfs_hash = "QmcPHxcC2ZN7m79XfYZ77YmF4t9UCErv87a9NFKrSLWKtJ"; - - assert_eq!( - SubgraphDeploymentID::new(ipfs_hash).unwrap().to_string(), - deployment_id - ); - - assert_eq!( - SubgraphDeploymentID::new(deployment_id) - .unwrap() - .ipfs_hash(), - ipfs_hash - ); - } - - #[test] - fn subgraph_deployment_id_input_validation_fail() { - let invalid_deployment_id = - "0xd0b0e5b65df45a3fff1a653b4188881318e8459d3338f936aab16c4003884a"; - let invalid_ipfs_hash = "Qm1234"; - - let res = SubgraphDeploymentID::new(invalid_deployment_id); - assert!(res.is_err()); - - let res = SubgraphDeploymentID::new(invalid_ipfs_hash); - assert!(res.is_err()); - } - #[test] fn paid_query_attestation() { - let subgraph_deployment_id = SubgraphDeploymentID::new( - "0xc064c354bc21dd958b1d41b67b8ef161b75d2246b425f68ed4c74964ae705cbd", - ) - .unwrap(); - let subgraph_deployment = SubgraphDeployment { - id: subgraph_deployment_id.clone(), + id: *DEPLOYMENT_ID, denied_at: None, staked_tokens: U256::from(0), signalled_tokens: U256::from(0), @@ -265,12 +209,11 @@ mod tests { let allocation_key = attestation_signer_for_allocation(INDEXER_OPERATOR_MNEMONIC, allocation).unwrap(); - let attestation_signer = create_attestation_signer( U256::from(1), Address::from_str("0xdeadbeefcafebabedeadbeefcafebabedeadbeef").unwrap(), allocation_key, - subgraph_deployment_id.bytes32(), + *DEPLOYMENT_ID, ) .unwrap(); diff --git a/service/src/server/routes/subgraphs.rs b/service/src/server/routes/subgraphs.rs index b21eeac9..128ff9f9 100644 --- a/service/src/server/routes/subgraphs.rs +++ b/service/src/server/routes/subgraphs.rs @@ -7,10 +7,10 @@ use axum::{ response::IntoResponse, Json, }; +use std::str::FromStr; +use toolshed::thegraph::DeploymentId; use tracing::trace; -use indexer_common::prelude::SubgraphDeploymentID; - use crate::{ metrics, query_processor::FreeQuery, @@ -31,11 +31,11 @@ pub async fn subgraph_queries( let (parts, body) = req.into_parts(); // Initialize id into a subgraph deployment ID - let subgraph_deployment_id = match SubgraphDeploymentID::new(id.as_str()) { + let subgraph_deployment_id = match DeploymentId::from_str(id.as_str()) { Ok(id) => id, Err(e) => return bad_request_response(&e.to_string()), }; - let deployment_label = subgraph_deployment_id.ipfs_hash(); + let deployment_label = subgraph_deployment_id.to_string(); let query_duration_timer = metrics::QUERY_DURATION .with_label_values(&[&deployment_label]) From 959afd682560008e781421f97a47b13182aaf39f Mon Sep 17 00:00:00 2001 From: Jannis Pohlmann Date: Tue, 10 Oct 2023 14:20:25 +0200 Subject: [PATCH 2/4] refactor: update a few dependencies --- Cargo.lock | 194 ++++++++++++++++++++++++----- common/Cargo.toml | 3 +- common/src/network_subgraph/mod.rs | 2 +- service/Cargo.toml | 8 +- 4 files changed, 172 insertions(+), 35 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index beada77c..831c4509 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -278,9 +278,9 @@ version = "4.0.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d9ed522678d412d77effe47b3c82314ac36952a35e6e852093dd48287c421f80" dependencies = [ - "async-graphql-derive", - "async-graphql-parser", - "async-graphql-value", + "async-graphql-derive 4.0.16", + "async-graphql-parser 4.0.16", + "async-graphql-value 4.0.16", "async-stream", "async-trait", "base64 0.13.1", @@ -304,13 +304,46 @@ dependencies = [ "thiserror", ] +[[package]] +name = "async-graphql" +version = "6.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1addb0b551c59640e15de99e7566a4e3a1186cf42269e160c485ba6d8b43fe30" +dependencies = [ + "async-graphql-derive 6.0.7", + "async-graphql-parser 6.0.7", + "async-graphql-value 6.0.7", + "async-stream", + "async-trait", + "base64 0.13.1", + "bytes", + "fast_chemail", + "fnv", + "futures-util", + "handlebars", + "http", + "indexmap 2.0.0", + "mime", + "multer", + "num-traits", + "once_cell", + "pin-project-lite", + "regex", + "serde", + "serde_json", + "serde_urlencoded", + "static_assertions", + "tempfile", + "thiserror", +] + [[package]] name = "async-graphql-axum" version = "4.0.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c91ac174c05670edffb720bc376b9d4c274c3d127ac08ed3d38144c9415502cd" dependencies = [ - "async-graphql", + "async-graphql 4.0.16", "async-trait", "axum", "bytes", @@ -328,7 +361,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c121a894495d7d3fc3d4e15e0a9843e422e4d1d9e3c514d8062a1c94b35b005d" dependencies = [ "Inflector", - "async-graphql-parser", + "async-graphql-parser 4.0.16", "darling 0.14.4", "proc-macro-crate 1.3.1", "proc-macro2", @@ -337,13 +370,42 @@ dependencies = [ "thiserror", ] +[[package]] +name = "async-graphql-derive" +version = "6.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e1121ff0be2feea705c24f6940162c4f14a077e50a217b16e091e6534a8c08a" +dependencies = [ + "Inflector", + "async-graphql-parser 6.0.7", + "darling 0.20.3", + "proc-macro-crate 1.3.1", + "proc-macro2", + "quote", + "strum 0.25.0", + "syn 2.0.28", + "thiserror", +] + [[package]] name = "async-graphql-parser" version = "4.0.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6b6c386f398145c6180206c1869c2279f5a3d45db5be4e0266148c6ac5c6ad68" dependencies = [ - "async-graphql-value", + "async-graphql-value 4.0.16", + "pest", + "serde", + "serde_json", +] + +[[package]] +name = "async-graphql-parser" +version = "6.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e0b6713fd4ffd610b8b6f6e911bf31277cbb84b7c2a9cdeeb39d1b3eed3b88e4" +dependencies = [ + "async-graphql-value 6.0.7", "pest", "serde", "serde_json", @@ -361,6 +423,18 @@ dependencies = [ "serde_json", ] +[[package]] +name = "async-graphql-value" +version = "6.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f7d74240f9daa8c1e8f73e9cfcc338d20a88d00bbeb83ded49ce8e5b4dcec0f5" +dependencies = [ + "bytes", + "indexmap 2.0.0", + "serde", + "serde_json", +] + [[package]] name = "async-stream" version = "0.3.5" @@ -2018,13 +2092,12 @@ dependencies = [ [[package]] name = "flume" -version = "0.10.14" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1657b4441c3403d9f7b3409e47575237dac27b1b5726df654a6ecbf92f0f7577" +checksum = "55ac459de2512911e4b674ce33cf20befaba382d05b62b008afc1c8b57cbf181" dependencies = [ "futures-core", "futures-sink", - "pin-project", "spin 0.9.8", ] @@ -2277,6 +2350,16 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "graphql" +version = "0.1.0" +source = "git+https://github.com/edgeandnode/toolshed?branch=main#714393efb0c02e27f406be1cce826f4961e979ae" +dependencies = [ + "firestorm", + "graphql-parser", + "serde", +] + [[package]] name = "graphql-parser" version = "0.4.0" @@ -2317,6 +2400,20 @@ dependencies = [ "tracing", ] +[[package]] +name = "handlebars" +version = "4.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c39b3bc2a8f715298032cf5087e58573809374b08160aa7d750582bdb82d2683" +dependencies = [ + "log", + "pest", + "pest_derive", + "serde", + "serde_json", + "thiserror", +] + [[package]] name = "hashbrown" version = "0.12.3" @@ -2679,6 +2776,7 @@ dependencies = [ "ethers", "ethers-core", "faux", + "graphql", "keccak-hash", "lazy_static", "log", @@ -3584,6 +3682,40 @@ dependencies = [ "ucd-trie", ] +[[package]] +name = "pest_derive" +version = "2.5.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "be99c4c1d2fc2769b1d00239431d711d08f6efedcecb8b6e30707160aee99c15" +dependencies = [ + "pest", + "pest_generator", +] + +[[package]] +name = "pest_generator" +version = "2.5.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e56094789873daa36164de2e822b3888c6ae4b4f9da555a1103587658c805b1e" +dependencies = [ + "pest", + "pest_meta", + "proc-macro2", + "quote", + "syn 2.0.28", +] + +[[package]] +name = "pest_meta" +version = "2.5.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6733073c7cff3d8459fda0e42f13a047870242aed8b509fe98000928975f359e" +dependencies = [ + "once_cell", + "pest", + "sha2 0.10.7", +] + [[package]] name = "petgraph" version = "0.6.3" @@ -4714,7 +4846,7 @@ dependencies = [ "alloy-primitives", "alloy-sol-types", "anyhow", - "async-graphql", + "async-graphql 4.0.16", "async-graphql-axum", "autometrics", "axum", @@ -4971,9 +5103,9 @@ dependencies = [ [[package]] name = "sqlx" -version = "0.7.1" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e58421b6bc416714d5115a2ca953718f6c621a51b68e4f4922aea5a4391a721" +checksum = "0e50c216e3624ec8e7ecd14c6a6a6370aad6ee5d8cfc3ab30b5162eeeef2ed33" dependencies = [ "sqlx-core", "sqlx-macros", @@ -4984,9 +5116,9 @@ dependencies = [ [[package]] name = "sqlx-core" -version = "0.7.1" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd4cef4251aabbae751a3710927945901ee1d97ee96d757f6880ebb9a79bfd53" +checksum = "8d6753e460c998bbd4cd8c6f0ed9a64346fcca0723d6e75e52fdc351c5d2169d" dependencies = [ "ahash 0.8.3", "atoi", @@ -5027,9 +5159,9 @@ dependencies = [ [[package]] name = "sqlx-macros" -version = "0.7.1" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "208e3165167afd7f3881b16c1ef3f2af69fa75980897aac8874a0696516d12c2" +checksum = "9a793bb3ba331ec8359c1853bd39eed32cdd7baaf22c35ccf5c92a7e8d1189ec" dependencies = [ "proc-macro2", "quote", @@ -5040,9 +5172,9 @@ dependencies = [ [[package]] name = "sqlx-macros-core" -version = "0.7.1" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a4a8336d278c62231d87f24e8a7a74898156e34c1c18942857be2acb29c7dfc" +checksum = "0a4ee1e104e00dedb6aa5ffdd1343107b0a4702e862a84320ee7cc74782d96fc" dependencies = [ "dotenvy", "either", @@ -5066,9 +5198,9 @@ dependencies = [ [[package]] name = "sqlx-mysql" -version = "0.7.1" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ca69bf415b93b60b80dc8fda3cb4ef52b2336614d8da2de5456cc942a110482" +checksum = "864b869fdf56263f4c95c45483191ea0af340f9f3e3e7b4d57a61c7c87a970db" dependencies = [ "atoi", "base64 0.21.2", @@ -5111,9 +5243,9 @@ dependencies = [ [[package]] name = "sqlx-postgres" -version = "0.7.1" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a0db2df1b8731c3651e204629dd55e52adbae0462fa1bdcbed56a2302c18181e" +checksum = "eb7ae0e6a97fb3ba33b23ac2671a5ce6e3cabe003f451abd5a56e7951d975624" dependencies = [ "atoi", "base64 0.21.2", @@ -5154,9 +5286,9 @@ dependencies = [ [[package]] name = "sqlx-sqlite" -version = "0.7.1" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be4c21bf34c7cae5b283efb3ac1bcc7670df7561124dc2f8bdc0b59be40f79a2" +checksum = "d59dc83cf45d89c555a577694534fcd1b55c545a816c816ce51f20bbe56a4f3f" dependencies = [ "atoi", "flume", @@ -5395,18 +5527,18 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.40" +version = "1.0.49" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "978c9a314bd8dc99be594bc3c175faaa9794be04a5a5e153caba6915336cebac" +checksum = "1177e8c6d7ede7afde3585fd2513e611227efd6481bd78d2e82ba1ce16557ed4" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.40" +version = "1.0.49" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9456a42c5b0d803c8cd86e73dd7cc9edd429499f37a3550d286d5e86720569f" +checksum = "10712f02019e9288794769fba95cd6847df9874d49d871d062172f9dd41bc4cc" dependencies = [ "proc-macro2", "quote", @@ -5623,15 +5755,19 @@ dependencies = [ [[package]] name = "toolshed" version = "0.2.2" -source = "git+https://github.com/edgeandnode/toolshed?tag=v0.2.2#5daca78935d9a9fc34216946d3f22c614d9dbeee" +source = "git+https://github.com/edgeandnode/toolshed?branch=main#714393efb0c02e27f406be1cce826f4961e979ae" dependencies = [ "alloy-primitives", + "alloy-sol-types", + "async-graphql 6.0.7", "bs58 0.5.0", + "ethers-core", "firestorm", "graphql-parser", "serde", "serde_with", "sha3", + "thiserror", "url", ] diff --git a/common/Cargo.toml b/common/Cargo.toml index d38613ff..aa045970 100644 --- a/common/Cargo.toml +++ b/common/Cargo.toml @@ -21,7 +21,8 @@ secp256k1 = { version = "0.27.0", features = ["recovery"] } serde = { version = "1.0.188", features = ["derive"] } serde_json = "1.0.107" tokio = { version = "1.32.0", features = ["full", "macros", "rt"] } -toolshed = { git = "https://github.com/edgeandnode/toolshed", tag = "v0.2.2", features = ["graphql"] } +toolshed = { git = "https://github.com/edgeandnode/toolshed", branch = "main", features = ["graphql"] } +graphql = { git = "https://github.com/edgeandnode/toolshed", branch = "main" } [dev-dependencies] env_logger = "0.9.0" diff --git a/common/src/network_subgraph/mod.rs b/common/src/network_subgraph/mod.rs index 894a8bb9..6b8d852f 100644 --- a/common/src/network_subgraph/mod.rs +++ b/common/src/network_subgraph/mod.rs @@ -3,10 +3,10 @@ use std::sync::Arc; +use graphql::http::Response; use reqwest::{header, Client, Url}; use serde::de::Deserialize; use serde_json::Value; -use toolshed::graphql::http::Response; /// Network subgraph query wrapper /// diff --git a/service/Cargo.toml b/service/Cargo.toml index c0bb3a94..200135f0 100644 --- a/service/Cargo.toml +++ b/service/Cargo.toml @@ -10,15 +10,15 @@ license = "Apache-2.0" [dependencies] indexer-common = { path = "../common" } confy = "0.5.1" -ethers-core = "2.0.0" -ethers = "2.0.0" +ethers-core = "2.0.10" +ethers = "2.0.10" dotenvy = "0.15" log = "0.4.17" anyhow = "1.0.57" reqwest = "0.11.10" tokio = { version = "1", features = ["rt", "macros", "sync", "full"] } tracing = "0.1.34" -thiserror = "1.0.30" +thiserror = "1.0.49" serde = { version = "1.0", features = ["rc", "derive"] } serde_json = "1" axum = "0.5" @@ -47,7 +47,7 @@ sqlx = { version = "0.7.1", features = ["postgres", "runtime-tokio", "bigdecimal alloy-primitives = { version = "0.3.3", features = ["serde"] } alloy-sol-types = "0.3.2" lazy_static = "1.4.0" -toolshed = { git = "https://github.com/edgeandnode/toolshed", tag = "v0.2.2", features = ["graphql"] } +toolshed = { git = "https://github.com/edgeandnode/toolshed", branch = "main", features = ["graphql"] } [dev-dependencies] faux = "0.1.10" From b0dc6042fb18787085acabd54246728db83e3210 Mon Sep 17 00:00:00 2001 From: Jannis Pohlmann Date: Tue, 10 Oct 2023 14:25:12 +0200 Subject: [PATCH 3/4] fix: correct cost model query logic, better typing --- service/src/common/indexer_error.rs | 3 - service/src/common/indexer_management/mod.rs | 554 ++++++++++++------- service/src/server/routes/cost.rs | 45 +- 3 files changed, 394 insertions(+), 208 deletions(-) diff --git a/service/src/common/indexer_error.rs b/service/src/common/indexer_error.rs index a596ef90..3e77a741 100644 --- a/service/src/common/indexer_error.rs +++ b/service/src/common/indexer_error.rs @@ -85,7 +85,6 @@ pub enum IndexerErrorCode { IE073, IE074, IE075, - IE076, } impl fmt::Display for IndexerErrorCode { @@ -166,7 +165,6 @@ impl fmt::Display for IndexerErrorCode { IndexerErrorCode::IE073 => write!(f, "IE073"), IndexerErrorCode::IE074 => write!(f, "IE074"), IndexerErrorCode::IE075 => write!(f, "IE075"), - IndexerErrorCode::IE076 => write!(f, "IE076"), } } } @@ -251,7 +249,6 @@ impl IndexerErrorCode { Self::IE073 => "Failed to query subgraph features from indexing statuses endpoint", Self::IE074 => "Failed to resolve the release version", Self::IE075 => "Failed to parse response body to query string", - Self::IE076 => "Database read failed", } } diff --git a/service/src/common/indexer_management/mod.rs b/service/src/common/indexer_management/mod.rs index 00a4334a..3fd6708c 100644 --- a/service/src/common/indexer_management/mod.rs +++ b/service/src/common/indexer_management/mod.rs @@ -1,84 +1,130 @@ // Copyright 2023-, GraphOps and Semiotic Labs. // SPDX-License-Identifier: Apache-2.0 -use async_graphql::SimpleObject; +use std::{collections::HashSet, str::FromStr}; + use serde::{Deserialize, Serialize}; use serde_json::Value; -use sqlx::{FromRow, PgPool}; - -use super::indexer_error::{IndexerError, IndexerErrorCause, IndexerErrorCode}; +use sqlx::PgPool; +use toolshed::thegraph::{DeploymentId, DeploymentIdError}; + +/// Internal cost model representation as stored in the database. +/// +/// These can have "global" as the deployment ID. +#[derive(Debug, Clone)] +struct DbCostModel { + pub deployment: String, + pub model: Option, + pub variables: Option, +} -#[derive(Debug, FromRow, Clone, Serialize, Deserialize, SimpleObject)] +/// External representation of cost models. +/// +/// Here, any notion of "global" is gone and deployment IDs are valid deployment IDs. +#[derive(Debug, Clone, Serialize, Deserialize)] pub struct CostModel { - pub deployment: String, + pub deployment: DeploymentId, pub model: Option, pub variables: Option, } -/// Query postgres indexer management server's cost models -/// If specific deployments is provided, then global fallback is applied to all -/// deployments regardless of its presence in the database. Otherwise, all cost -/// models are returned without merging fields with the global cost model +impl TryFrom for CostModel { + type Error = DeploymentIdError; + + fn try_from(db_model: DbCostModel) -> Result { + Ok(Self { + deployment: DeploymentId::from_str(&db_model.deployment)?, + model: db_model.model, + variables: db_model.variables, + }) + } +} + +impl From for DbCostModel { + fn from(model: CostModel) -> Self { + let deployment = model.deployment; + DbCostModel { + deployment: format!("{deployment:#x}"), + model: model.model, + variables: model.variables, + } + } +} + +/// Query cost models from the database, merging the global cost model in +/// whenever there is no cost model defined for a deployment. pub async fn cost_models( pool: &PgPool, - deployments: &[String], -) -> Result, IndexerError> { - let deployment_ids = deployments.to_vec(); - let models = if deployment_ids.is_empty() { + deployments: &[DeploymentId], +) -> Result, anyhow::Error> { + let hex_ids = deployments + .iter() + .map(|d| format!("{d:#x}")) + .collect::>(); + + let mut models = if deployments.is_empty() { sqlx::query_as!( - CostModel, + DbCostModel, r#" - SELECT deployment, model, variables - FROM "CostModels" - ORDER BY deployment ASC - "# + SELECT deployment, model, variables + FROM "CostModels" + WHERE deployment != 'global' + ORDER BY deployment ASC + "# ) .fetch_all(pool) - .await - .map_err(|e| IndexerError::new(IndexerErrorCode::IE076, Some(IndexerErrorCause::new(e))))? + .await? } else { sqlx::query_as!( - CostModel, + DbCostModel, r#" SELECT deployment, model, variables FROM "CostModels" WHERE deployment = ANY($1) + AND deployment != 'global' ORDER BY deployment ASC - "#, - &deployment_ids + "#, + &hex_ids ) .fetch_all(pool) - .await - .map_err(|e| IndexerError::new(IndexerErrorCode::IE076, Some(IndexerErrorCause::new(e))))? - }; - - // Merge deployment cost models with global cost model - let models = match (deployment_ids.is_empty(), global_cost_model(pool).await?) { - (false, Some(global)) => { - let m = deployment_ids - .iter() - .map(|d| { - let m = models.iter().find(|&m| &m.deployment == d).map_or_else( - || { - merge_global( - CostModel { - deployment: d.clone(), - model: None, - variables: None, - }, - &global, - ) - }, - |m| merge_global(m.clone(), &global), - ); - m - }) - .collect::>(); - - m - } - _ => models, - }; + .await? + } + .into_iter() + .map(CostModel::try_from) + .collect::, _>>()?; + + let deployments_with_models = models + .iter() + .map(|model| &model.deployment) + .collect::>(); + + let deployments_without_models = deployments + .iter() + .filter(|deployment| !deployments_with_models.contains(deployment)) + .collect::>(); + + // Query the global cost model + if let Some(global_model) = global_cost_model(pool).await? { + // For all deployments that have a cost model, merge it with the global one + models = models + .into_iter() + .map(|model| merge_global(model, &global_model)) + .collect(); + + // Inject a cost model for all deployments that don't have one + models = models + .into_iter() + .chain( + deployments_without_models + .into_iter() + .map(|deployment| CostModel { + deployment: deployment.to_owned(), + model: global_model.model.clone(), + variables: global_model.variables.clone(), + }), + ) + .collect(); + } Ok(models) } @@ -86,61 +132,59 @@ pub async fn cost_models( /// Make database query for a cost model indexed by deployment id pub async fn cost_model( pool: &PgPool, - deployment: &str, -) -> Result, IndexerError> { + deployment: &DeploymentId, +) -> Result, anyhow::Error> { let model = sqlx::query_as!( - CostModel, + DbCostModel, r#" SELECT deployment, model, variables FROM "CostModels" WHERE deployment = $1 - "#, - deployment + AND deployment != 'global' + "#, + format!("{:#x}", deployment), ) .fetch_optional(pool) - .await - .map_err(|e| IndexerError::new(IndexerErrorCode::IE076, Some(IndexerErrorCause::new(e))))?; - - // Fallback with global cost model - let model = if let Some(global) = global_cost_model(pool).await? { - model.map_or_else( - || { - Some(merge_global( - CostModel { - deployment: deployment.into(), - model: None, - variables: None, - }, - &global, - )) - }, - |m| Some(merge_global(m, &global)), - ) - } else { - model - }; - - Ok(model) + .await? + .map(CostModel::try_from) + .transpose()?; + + let global_model = global_cost_model(pool).await?; + + Ok(match (model, global_model) { + // If we have no global model, return whatever we can find for the deployment + (None, None) => None, + (Some(model), None) => Some(model), + + // If we have a cost model and a global cost model, merge them + (Some(model), Some(global_model)) => Some(merge_global(model, &global_model)), + + // If we have only a global model, use that + (None, Some(global_model)) => Some(CostModel { + deployment: deployment.to_owned(), + model: global_model.model, + variables: global_model.variables, + }), + }) } /// Query global cost model -pub async fn global_cost_model(pool: &PgPool) -> Result, IndexerError> { - let model = sqlx::query_as!( - CostModel, +async fn global_cost_model(pool: &PgPool) -> Result, anyhow::Error> { + sqlx::query_as!( + DbCostModel, r#" SELECT deployment, model, variables FROM "CostModels" WHERE deployment = $1 - "#, + "#, "global" ) .fetch_optional(pool) .await - .map_err(|e| IndexerError::new(IndexerErrorCode::IE076, Some(IndexerErrorCause::new(e))))?; - Ok(model) + .map_err(Into::into) } -fn merge_global(model: CostModel, global_model: &CostModel) -> CostModel { +fn merge_global(model: CostModel, global_model: &DbCostModel) -> CostModel { CostModel { deployment: model.deployment, model: model.model.clone().or(global_model.model.clone()), @@ -151,6 +195,8 @@ fn merge_global(model: CostModel, global_model: &CostModel) -> CostModel { #[cfg(test)] mod test { + use std::str::FromStr; + use sqlx::PgPool; use super::*; @@ -174,11 +220,28 @@ mod test { async fn add_cost_models(pool: &PgPool, models: Vec) { for model in models { + let deployment = model.deployment; sqlx::query!( r#" - INSERT INTO "CostModels" (deployment, model) - VALUES ($1, $2); - "#, + INSERT INTO "CostModels" (deployment, model) + VALUES ($1, $2); + "#, + format!("{deployment:#x}"), + model.model, + ) + .execute(pool) + .await + .expect("Create test instance in db"); + } + } + + async fn add_db_cost_models(pool: &PgPool, models: Vec) { + for model in models { + sqlx::query!( + r#" + INSERT INTO "CostModels" (deployment, model) + VALUES ($1, $2); + "#, model.deployment, model.model, ) @@ -188,26 +251,35 @@ mod test { } } - fn global_cost_model() -> CostModel { - CostModel { + fn global_cost_model() -> DbCostModel { + DbCostModel { deployment: "global".to_string(), model: Some("default => 0.00001;".to_string()), variables: None, } } - fn simple_cost_models() -> Vec { + fn test_data() -> Vec { vec![ + CostModel { + deployment: "0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" + .parse() + .unwrap(), + model: None, + variables: None, + }, CostModel { deployment: "0xbd499f7673ca32ef4a642207a8bebdd0fb03888cf2678b298438e3a1ae5206ea" - .to_string(), + .parse() + .unwrap(), model: Some("default => 0.00025;".to_string()), variables: None, }, CostModel { - deployment: "0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" - .to_string(), - model: None, + deployment: "0xcccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccc" + .parse() + .unwrap(), + model: Some("default => 0.00012;".to_string()), variables: None, }, ] @@ -215,129 +287,219 @@ mod test { #[sqlx::test] async fn success_cost_models(pool: PgPool) { + let test_models = test_data(); + let test_deployments = test_models + .iter() + .map(|model| model.deployment) + .collect::>(); + setup_cost_models_table(&pool).await; - let expected_models = simple_cost_models(); - add_cost_models(&pool, expected_models.clone()).await; - let res = cost_models( - &pool, - &["Qmb5Ysp5oCUXhLA8NmxmYKDAX2nCMnh7Vvb5uffb9n5vss".to_string()], - ) - .await - .expect("Cost models query"); + add_cost_models(&pool, test_models.clone()).await; - assert_eq!(res.len(), 1); - assert_eq!( - &res.first().unwrap().deployment, - &expected_models.first().unwrap().deployment - ); - assert_eq!( - &res.first().unwrap().model, - &expected_models.first().unwrap().model - ); + // First test: query without deployment filter + let models = cost_models(&pool, &[]) + .await + .expect("cost models query without deployment filter"); - let res = cost_models( - &pool, - &["0xbd499f7673ca32ef4a642207a8bebdd0fb03888cf2678b298438e3a1ae5206ea".to_string()], - ) - .await - .expect("Cost models query"); + // We expect as many models as we have in the test data + assert_eq!(models.len(), test_models.len()); - assert_eq!(res.len(), 1); - assert_eq!( - &res.first().unwrap().deployment, - &expected_models.first().unwrap().deployment - ); - assert_eq!( - &res.first().unwrap().model, - &expected_models.first().unwrap().model - ); + // We expect models for all test deployments to be present and + // identical to the test data + for test_deployment in test_deployments.iter() { + let test_model = test_models + .iter() + .find(|model| &model.deployment == test_deployment) + .expect("finding cost model for test deployment in test data"); + + let model = models + .iter() + .find(|model| &model.deployment == test_deployment) + .expect("finding cost model for test deployment in query result"); + + assert_eq!(test_model.model, model.model); + } + + // Second test: query with a deployment filter + let sample_deployments = vec![ + test_models.get(0).unwrap().deployment, + test_models.get(1).unwrap().deployment, + ]; + let models = cost_models(&pool, &sample_deployments) + .await + .expect("cost models query with deployment filter"); + + // Expect two cost mdoels to be returned + assert_eq!(models.len(), sample_deployments.len()); + + // Expect both returned deployments to be identical to the test data + for test_deployment in sample_deployments.iter() { + let test_model = test_models + .iter() + .find(|model| &model.deployment == test_deployment) + .expect("finding cost model for test deployment in test data"); + + let model = models + .iter() + .find(|model| &model.deployment == test_deployment) + .expect("finding cost model for test deployment in query result"); + + assert_eq!(test_model.model, model.model); + } } #[sqlx::test] async fn global_fallback_cost_models(pool: PgPool) { - let deployment_id = - "0xaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa".to_string(); + let test_models = test_data(); + let test_deployments = test_models + .iter() + .map(|model| model.deployment) + .collect::>(); + let global_model = global_cost_model(); + setup_cost_models_table(&pool).await; - add_cost_models(&pool, simple_cost_models()).await; - let global = global_cost_model(); - add_cost_models(&pool, vec![global.clone()]).await; - let res = cost_models(&pool, &[]) + add_cost_models(&pool, test_models.clone()).await; + add_db_cost_models(&pool, vec![global_model.clone()]).await; + + // First test: fetch cost models without filtering by deployment + let models = cost_models(&pool, &[]) .await - .expect("Cost models query without deployments filter"); - - assert_eq!(res.len(), 3); - let incomplete_model = res.iter().find(|m| m.deployment == deployment_id.clone()); - assert!(incomplete_model.is_some()); - assert_ne!(incomplete_model.unwrap().model, global.model); - - let res = cost_models( - &pool, - &[ - deployment_id.clone(), - "0xbd499f7673ca32ef4a642207a8bebdd0fb03888cf2678b298438e3a1ae5206ea".to_string(), - ], - ) - .await - .expect("Cost models query without deployments filter"); + .expect("cost models query without deployments filter"); - assert_eq!(res.len(), 2); - let incomplete_model = res.iter().find(|m| m.deployment == deployment_id.clone()); - assert!(incomplete_model.is_some()); - assert_eq!(incomplete_model.unwrap().model, global.model); + // Since we've defined 3 cost models and we did not provide a filter, we + // expect all of them to be returned except for the global cost model + assert_eq!(models.len(), test_models.len()); - let complete_model = res.iter().find(|m| { - m.deployment == *"0xbd499f7673ca32ef4a642207a8bebdd0fb03888cf2678b298438e3a1ae5206ea" - }); - assert!(complete_model.is_some()); - assert_ne!(complete_model.unwrap().model, global.model); + // Expect all test deployments to be present in the query result + for test_deployment in test_deployments.iter() { + let test_model = test_models + .iter() + .find(|model| &model.deployment == test_deployment) + .expect("finding cost model for deployment in test data"); - let missing_deployment = "Qmaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"; - let res = cost_models(&pool, &[missing_deployment.to_string()]) - .await - .expect("Cost models query without deployments filter"); + let model = models + .iter() + .find(|model| &model.deployment == test_deployment) + .expect("finding cost model for deployment in query result"); + + if test_model.model.is_some() { + // If the test model has a model definition, we expect that to be returned + assert_eq!(model.model, test_model.model); + } else { + // If the test model has no model definition, we expect the global + // model definition to be returned + assert_eq!(model.model, global_model.model); + } + } + + // Second test: fetch cost models, filtering by the first two deployment IDs + let sample_deployments = vec![ + test_models.get(0).unwrap().deployment, + test_models.get(1).unwrap().deployment, + ]; + let models = dbg!(cost_models(&pool, &sample_deployments).await) + .expect("cost models query with deployments filter"); - let missing_model = res.iter().find(|m| { - m.deployment == *"0xb5ddb473e202a7abba81803ad153fd93a9b18d07ab38a711f7c2bd79435e50d7" - }); - assert!(missing_model.is_some()); - assert_eq!(missing_model.unwrap().model, global.model); + // We've filtered by two deployment IDs and are expecting two cost models to be returned + assert_eq!(models.len(), sample_deployments.len()); + + for test_deployment in sample_deployments { + let test_model = test_models + .iter() + .find(|model| model.deployment == test_deployment) + .expect("finding cost model for deployment in test data"); + + let model = models + .iter() + .find(|model| model.deployment == test_deployment) + .expect("finding cost model for deployment in query result"); + + if test_model.model.is_some() { + // If the test model has a model definition, we expect that to be returned + assert_eq!(model.model, test_model.model); + } else { + // If the test model has no model definition, we expect the global + // model definition to be returned + assert_eq!(model.model, global_model.model); + } + } + + // Third test: query for missing cost model + let missing_deployment = + DeploymentId::from_str("Qmaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa").unwrap(); + let models = cost_models(&pool, &[missing_deployment]) + .await + .expect("cost models query for missing deployment"); + + // The deployment may be missing in the database but we have a global model + // and expect that to be returned, with the missing deployment ID + let missing_model = models + .iter() + .find(|m| m.deployment == missing_deployment) + .expect("finding missing deployment"); + assert_eq!(missing_model.model, global_model.model); } #[sqlx::test] async fn success_cost_model(pool: PgPool) { - let deployment_id = "0xbd499f7673ca32ef4a642207a8bebdd0fb03888cf2678b298438e3a1ae5206ea"; - let deployment_hash = "Qmb5Ysp5oCUXhLA8NmxmYKDAX2nCMnh7Vvb5uffb9n5vss".to_string(); setup_cost_models_table(&pool).await; - add_cost_models(&pool, simple_cost_models()).await; - let res = cost_model(&pool, &deployment_hash) + add_cost_models(&pool, test_data()).await; + + let deployment_id_from_bytes = DeploymentId( + "0xbd499f7673ca32ef4a642207a8bebdd0fb03888cf2678b298438e3a1ae5206ea" + .parse() + .unwrap(), + ); + let deployment_id_from_hash = + DeploymentId::from_str("Qmb5Ysp5oCUXhLA8NmxmYKDAX2nCMnh7Vvb5uffb9n5vss").unwrap(); + + assert_eq!(deployment_id_from_bytes, deployment_id_from_hash); + + let model = cost_model(&pool, &deployment_id_from_bytes) .await - .expect("Cost model query") - .expect("Cost model match deployment"); + .expect("cost model query") + .expect("cost model for deployment"); - assert_eq!(res.deployment, deployment_id.to_string()); + assert_eq!(model.deployment, deployment_id_from_hash); + assert_eq!(model.model, Some("default => 0.00025;".to_string())); } #[sqlx::test] async fn global_fallback_cost_model(pool: PgPool) { - let deployment_hash = "Qmaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"; - setup_cost_models_table(&pool).await; - add_cost_models(&pool, simple_cost_models()).await; + let test_models = test_data(); + let global_model = global_cost_model(); - let res = cost_model(&pool, deployment_hash) - .await - .expect("Cost model query"); - - assert!(res.is_none()); - - let global = global_cost_model(); - add_cost_models(&pool, vec![global.clone()]).await; + setup_cost_models_table(&pool).await; + add_cost_models(&pool, test_models.clone()).await; + add_db_cost_models(&pool, vec![global_model.clone()]).await; + + // Test that the behavior is correct for existing deployments + for test_model in test_models { + let model = cost_model(&pool, &test_model.deployment) + .await + .expect("cost model query") + .expect("global cost model fallback"); + + assert_eq!(model.deployment, test_model.deployment); + + if test_model.model.is_some() { + // If the test model has a model definition, we expect that to be returned + assert_eq!(model.model, test_model.model); + } else { + // If the test model has no model definition, we expect the global + // model definition to be returned + assert_eq!(model.model, global_model.model); + } + } - let res = cost_model(&pool, deployment_hash) + // Test that querying a non-existing deployment returns the default cost model + let missing_deployment = + DeploymentId::from_str("Qmnononononononononononononononononononononono").unwrap(); + let model = cost_model(&pool, &missing_deployment) .await - .expect("Cost model query") - .expect("Global cost model fallback"); - - assert_eq!(res.model, global.model); - assert_eq!(&res.deployment, deployment_hash); + .expect("cost model query") + .expect("global cost model fallback"); + assert_eq!(model.deployment, missing_deployment); + assert_eq!(model.model, global_model.model); } } diff --git a/service/src/server/routes/cost.rs b/service/src/server/routes/cost.rs index fd40a0ed..caeb39ad 100644 --- a/service/src/server/routes/cost.rs +++ b/service/src/server/routes/cost.rs @@ -1,18 +1,37 @@ // Copyright 2023-, GraphOps and Semiotic Labs. // SPDX-License-Identifier: Apache-2.0 -use async_graphql::{Context, EmptyMutation, EmptySubscription, Object, Schema}; +use std::str::FromStr; + +use async_graphql::{Context, EmptyMutation, EmptySubscription, Object, Schema, SimpleObject}; use async_graphql_axum::{GraphQLRequest, GraphQLResponse}; use axum::extract::Extension; +use serde::{Deserialize, Serialize}; +use serde_json::Value; +use toolshed::thegraph::DeploymentId; use crate::{ - common::{ - indexer_error::IndexerError, - indexer_management::{self, CostModel}, - }, + common::indexer_management::{self, CostModel}, server::ServerOptions, }; +#[derive(Clone, Debug, Serialize, Deserialize, SimpleObject)] +pub struct GraphQlCostModel { + pub deployment: String, + pub model: Option, + pub variables: Option, +} + +impl From for GraphQlCostModel { + fn from(model: CostModel) -> Self { + Self { + deployment: model.deployment.to_string(), + model: model.model, + variables: model.variables, + } + } +} + pub type CostSchema = Schema; #[derive(Default)] @@ -24,18 +43,26 @@ impl QueryRoot { &self, ctx: &Context<'_>, deployments: Vec, - ) -> Result, IndexerError> { + ) -> Result, anyhow::Error> { + let deployment_ids = deployments + .into_iter() + .map(|s| DeploymentId::from_str(&s)) + .collect::, _>>()?; let pool = &ctx.data_unchecked::().indexer_management_db; - indexer_management::cost_models(pool, &deployments).await + let cost_models = indexer_management::cost_models(pool, &deployment_ids).await?; + Ok(cost_models.into_iter().map(|m| m.into()).collect()) } async fn cost_model( &self, ctx: &Context<'_>, deployment: String, - ) -> Result, IndexerError> { + ) -> Result, anyhow::Error> { + let deployment_id = DeploymentId::from_str(&deployment)?; let pool = &ctx.data_unchecked::().indexer_management_db; - indexer_management::cost_model(pool, &deployment).await + indexer_management::cost_model(pool, &deployment_id) + .await + .map(|model_opt| model_opt.map(GraphQlCostModel::from)) } } From cff89d43479ff4640d87aa643050a48a4a6e1fb4 Mon Sep 17 00:00:00 2001 From: Jannis Pohlmann Date: Tue, 10 Oct 2023 14:26:06 +0200 Subject: [PATCH 4/4] chore: update files for sqlx offline mode --- ...edc4204bbbd32aac6f7da7e99fb501ca5cc14.json | 32 +++++++++++++++++++ ...5685daaed836ef4c927afd8ae7e2804d4a9fd.json | 15 --------- ...1732bb3638a3a4f942a665cf1fd38eb70c2d.json} | 8 +++-- ...51e43b6b9ffe9834df85a62707d3a2d051b4.json} | 4 +-- ...651cfd94b4b82f26baf0755efa80e6045c0a.json} | 4 +-- ...893b4f7209c985c367215b2eed25adc78c462.json | 15 +++++++++ 6 files changed, 56 insertions(+), 22 deletions(-) create mode 100644 .sqlx/query-842bde7fba1c7652b7cfc2dc568edc4204bbbd32aac6f7da7e99fb501ca5cc14.json delete mode 100644 .sqlx/query-88901c4779fdd2c4653a2f0544a5685daaed836ef4c927afd8ae7e2804d4a9fd.json rename .sqlx/{query-7a3adab989a9552349dea7dbd52bcbdb40e034ee610c786a2111c493939e7e94.json => query-b54b1069daf03a377a0e7c09c9aa1732bb3638a3a4f942a665cf1fd38eb70c2d.json} (70%) rename .sqlx/{query-a3b0fb77cb7760254342dac981bc2f9c19f96fb5748629ebf0498d3820179d97.json => query-d93dd26d7221c5e1ae15a919a2a651e43b6b9ffe9834df85a62707d3a2d051b4.json} (77%) rename .sqlx/{query-1ae5d2aa752bf2e9038455d7c643bba03daa9e26c7ad23f48f9959a120e28a84.json => query-e14503b633fc673b65448e70c204651cfd94b4b82f26baf0755efa80e6045c0a.json} (79%) create mode 100644 .sqlx/query-ef6affb9039ad19a69f4a5116d7893b4f7209c985c367215b2eed25adc78c462.json diff --git a/.sqlx/query-842bde7fba1c7652b7cfc2dc568edc4204bbbd32aac6f7da7e99fb501ca5cc14.json b/.sqlx/query-842bde7fba1c7652b7cfc2dc568edc4204bbbd32aac6f7da7e99fb501ca5cc14.json new file mode 100644 index 00000000..2b0cde21 --- /dev/null +++ b/.sqlx/query-842bde7fba1c7652b7cfc2dc568edc4204bbbd32aac6f7da7e99fb501ca5cc14.json @@ -0,0 +1,32 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT deployment, model, variables\n FROM \"CostModels\"\n WHERE deployment != 'global'\n ORDER BY deployment ASC\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "deployment", + "type_info": "Varchar" + }, + { + "ordinal": 1, + "name": "model", + "type_info": "Text" + }, + { + "ordinal": 2, + "name": "variables", + "type_info": "Jsonb" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + false, + true, + true + ] + }, + "hash": "842bde7fba1c7652b7cfc2dc568edc4204bbbd32aac6f7da7e99fb501ca5cc14" +} diff --git a/.sqlx/query-88901c4779fdd2c4653a2f0544a5685daaed836ef4c927afd8ae7e2804d4a9fd.json b/.sqlx/query-88901c4779fdd2c4653a2f0544a5685daaed836ef4c927afd8ae7e2804d4a9fd.json deleted file mode 100644 index 3e1e0bb1..00000000 --- a/.sqlx/query-88901c4779fdd2c4653a2f0544a5685daaed836ef4c927afd8ae7e2804d4a9fd.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n INSERT INTO \"CostModels\" (deployment, model)\n VALUES ($1, $2);\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Varchar", - "Text" - ] - }, - "nullable": [] - }, - "hash": "88901c4779fdd2c4653a2f0544a5685daaed836ef4c927afd8ae7e2804d4a9fd" -} diff --git a/.sqlx/query-7a3adab989a9552349dea7dbd52bcbdb40e034ee610c786a2111c493939e7e94.json b/.sqlx/query-b54b1069daf03a377a0e7c09c9aa1732bb3638a3a4f942a665cf1fd38eb70c2d.json similarity index 70% rename from .sqlx/query-7a3adab989a9552349dea7dbd52bcbdb40e034ee610c786a2111c493939e7e94.json rename to .sqlx/query-b54b1069daf03a377a0e7c09c9aa1732bb3638a3a4f942a665cf1fd38eb70c2d.json index 6b00cac1..7dbd0863 100644 --- a/.sqlx/query-7a3adab989a9552349dea7dbd52bcbdb40e034ee610c786a2111c493939e7e94.json +++ b/.sqlx/query-b54b1069daf03a377a0e7c09c9aa1732bb3638a3a4f942a665cf1fd38eb70c2d.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT deployment, model, variables\n FROM \"CostModels\"\n ORDER BY deployment ASC\n ", + "query": "\n SELECT deployment, model, variables\n FROM \"CostModels\"\n WHERE deployment = $1\n AND deployment != 'global'\n ", "describe": { "columns": [ { @@ -20,7 +20,9 @@ } ], "parameters": { - "Left": [] + "Left": [ + "Text" + ] }, "nullable": [ false, @@ -28,5 +30,5 @@ true ] }, - "hash": "7a3adab989a9552349dea7dbd52bcbdb40e034ee610c786a2111c493939e7e94" + "hash": "b54b1069daf03a377a0e7c09c9aa1732bb3638a3a4f942a665cf1fd38eb70c2d" } diff --git a/.sqlx/query-a3b0fb77cb7760254342dac981bc2f9c19f96fb5748629ebf0498d3820179d97.json b/.sqlx/query-d93dd26d7221c5e1ae15a919a2a651e43b6b9ffe9834df85a62707d3a2d051b4.json similarity index 77% rename from .sqlx/query-a3b0fb77cb7760254342dac981bc2f9c19f96fb5748629ebf0498d3820179d97.json rename to .sqlx/query-d93dd26d7221c5e1ae15a919a2a651e43b6b9ffe9834df85a62707d3a2d051b4.json index 2b11d524..6b2da69c 100644 --- a/.sqlx/query-a3b0fb77cb7760254342dac981bc2f9c19f96fb5748629ebf0498d3820179d97.json +++ b/.sqlx/query-d93dd26d7221c5e1ae15a919a2a651e43b6b9ffe9834df85a62707d3a2d051b4.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT deployment, model, variables\n FROM \"CostModels\"\n WHERE deployment = ANY($1)\n ORDER BY deployment ASC\n ", + "query": "\n SELECT deployment, model, variables\n FROM \"CostModels\"\n WHERE deployment = ANY($1)\n AND deployment != 'global'\n ORDER BY deployment ASC\n ", "describe": { "columns": [ { @@ -30,5 +30,5 @@ true ] }, - "hash": "a3b0fb77cb7760254342dac981bc2f9c19f96fb5748629ebf0498d3820179d97" + "hash": "d93dd26d7221c5e1ae15a919a2a651e43b6b9ffe9834df85a62707d3a2d051b4" } diff --git a/.sqlx/query-1ae5d2aa752bf2e9038455d7c643bba03daa9e26c7ad23f48f9959a120e28a84.json b/.sqlx/query-e14503b633fc673b65448e70c204651cfd94b4b82f26baf0755efa80e6045c0a.json similarity index 79% rename from .sqlx/query-1ae5d2aa752bf2e9038455d7c643bba03daa9e26c7ad23f48f9959a120e28a84.json rename to .sqlx/query-e14503b633fc673b65448e70c204651cfd94b4b82f26baf0755efa80e6045c0a.json index ca7766a0..61996727 100644 --- a/.sqlx/query-1ae5d2aa752bf2e9038455d7c643bba03daa9e26c7ad23f48f9959a120e28a84.json +++ b/.sqlx/query-e14503b633fc673b65448e70c204651cfd94b4b82f26baf0755efa80e6045c0a.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT deployment, model, variables\n FROM \"CostModels\"\n WHERE deployment = $1\n ", + "query": "\n SELECT deployment, model, variables\n FROM \"CostModels\"\n WHERE deployment = $1\n ", "describe": { "columns": [ { @@ -30,5 +30,5 @@ true ] }, - "hash": "1ae5d2aa752bf2e9038455d7c643bba03daa9e26c7ad23f48f9959a120e28a84" + "hash": "e14503b633fc673b65448e70c204651cfd94b4b82f26baf0755efa80e6045c0a" } diff --git a/.sqlx/query-ef6affb9039ad19a69f4a5116d7893b4f7209c985c367215b2eed25adc78c462.json b/.sqlx/query-ef6affb9039ad19a69f4a5116d7893b4f7209c985c367215b2eed25adc78c462.json new file mode 100644 index 00000000..0c8cfd91 --- /dev/null +++ b/.sqlx/query-ef6affb9039ad19a69f4a5116d7893b4f7209c985c367215b2eed25adc78c462.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO \"CostModels\" (deployment, model)\n VALUES ($1, $2);\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Varchar", + "Text" + ] + }, + "nullable": [] + }, + "hash": "ef6affb9039ad19a69f4a5116d7893b4f7209c985c367215b2eed25adc78c462" +}