diff --git a/Cargo.toml b/Cargo.toml index ddc78f5..a9c2c77 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -16,3 +16,4 @@ tokio = { version = "1.28.1", features = ["macros", "rt-multi-thread"] } toml = "0.7.4" tower-http = { version = "0.4.0", features = ["cors"] } chrono = "0.4.24" +reqwest = "0.11.20" diff --git a/Dockerfile b/Dockerfile index f82c8e5..07445f1 100644 --- a/Dockerfile +++ b/Dockerfile @@ -10,14 +10,21 @@ COPY Cargo.toml Cargo.lock config.toml ./ # Copy the source code COPY src ./src -# Build the application in release mode -RUN cargo build --release +# Accept a build argument for the build mode +ARG BUILD_MODE=release -# Expose the port your application uses (replace 8083 with your app's port) +# Build the application based on the build mode +RUN if [ "$BUILD_MODE" = "debug" ]; then \ + cargo build; \ +else \ + cargo build --release; \ +fi + +# Expose the port your application uses EXPOSE 8080 # Set the unbuffered environment variable ENV RUST_BACKTRACE "1" -# Run the binary -CMD ["./target/release/starknetid_server"] +# Run the binary conditionally based on the build mode +CMD if [ "$BUILD_MODE" = "debug" ]; then ./target/debug/starknetid_server; else ./target/release/starknetid_server; fi diff --git a/config.template.toml b/config.template.toml index 6aefaab..5bd4d7e 100644 --- a/config.template.toml +++ b/config.template.toml @@ -15,3 +15,7 @@ naming = "0xXXXXXXXXXXXX" verifier = "0xXXXXXXXXXXXX" old_verifier = "0xXXXXXXXXXXXX" pop_verifier = "0xXXXXXXXXXXXX" + +[starkscan] +api_url = "https://api-testnet..starkscan.co/api/v0/" +api_key="xxxxxx" \ No newline at end of file diff --git a/src/config.rs b/src/config.rs index 68d300e..40f0d0c 100644 --- a/src/config.rs +++ b/src/config.rs @@ -4,27 +4,27 @@ use std::env; use std::fs; macro_rules! pub_struct { - ($name:ident {$($field:ident: $t:ty,)*}) => { - #[derive(Clone, Deserialize)] + ($($derive:path),*; $name:ident {$($field:ident: $t:ty),* $(,)?}) => { + #[derive($($derive),*)] pub struct $name { $(pub $field: $t),* } } } -pub_struct!(Server { port: u16, }); +pub_struct!(Clone, Deserialize; Server { port: u16 }); -pub_struct!(Databases { +pub_struct!(Clone, Deserialize; Databases { starknetid: Database, sales: Database, }); -pub_struct!(Database { +pub_struct!(Clone, Deserialize; Database { name: String, connection_string: String, }); -pub_struct!(Contracts { +pub_struct!(Clone, Deserialize; Contracts { starknetid: FieldElement, naming: FieldElement, verifier: FieldElement, @@ -32,10 +32,16 @@ pub_struct!(Contracts { pop_verifier: FieldElement, }); -pub_struct!(Config { +pub_struct!(Clone, Deserialize; Starkscan { + api_url: String, + api_key: String, +}); + +pub_struct!(Clone, Deserialize; Config { server: Server, databases: Databases, contracts: Contracts, + starkscan: Starkscan, }); pub fn load() -> Config { diff --git a/src/endpoints/mod.rs b/src/endpoints/mod.rs index c547c57..fb4b532 100644 --- a/src/endpoints/mod.rs +++ b/src/endpoints/mod.rs @@ -10,4 +10,7 @@ pub mod domain_to_data; pub mod galxe; pub mod id_to_data; pub mod referral; +pub mod renewal; +pub mod starkscan; +pub mod stats; pub mod uri; diff --git a/src/endpoints/renewal/get_renewal_data.rs b/src/endpoints/renewal/get_renewal_data.rs new file mode 100644 index 0000000..5842cf8 --- /dev/null +++ b/src/endpoints/renewal/get_renewal_data.rs @@ -0,0 +1,67 @@ +use crate::{ + models::AppState, + utils::{get_error, to_hex}, +}; +use axum::{ + extract::{Query, State}, + http::{HeaderMap, HeaderValue, StatusCode}, + response::{IntoResponse, Json}, +}; +use futures::StreamExt; +use mongodb::bson::doc; +use serde::{Deserialize, Serialize}; +use starknet::core::types::FieldElement; +use std::sync::Arc; + +#[derive(Serialize)] +pub struct StarknetIdData { + starknet_id: String, +} + +#[derive(Deserialize)] +pub struct StarknetIdQuery { + addr: FieldElement, + domain: String, +} + +pub async fn handler( + State(state): State>, + Query(query): Query, +) -> impl IntoResponse { + let renew_collection = state + .starknetid_db + .collection::("auto_renew_flows"); + + let documents = renew_collection + .find( + doc! { + "renewer_address": to_hex(&query.addr), + "domain": query.domain, + "_cursor.to": null, + }, + None, + ) + .await; + + match documents { + Ok(mut cursor) => { + let mut headers = HeaderMap::new(); + headers.insert("Cache-Control", HeaderValue::from_static("max-age=30")); + + if let Some(result) = cursor.next().await { + match result { + Ok(res) => { + let mut res = res; + res.remove("_id"); + res.remove("_cursor"); + (StatusCode::OK, headers, Json(res)).into_response() + } + Err(e) => get_error(format!("Error while processing the document: {:?}", e)), + } + } else { + get_error("no results founds".to_string()) + } + } + Err(_) => get_error("Error while fetching from database".to_string()), + } +} diff --git a/src/endpoints/renewal/mod.rs b/src/endpoints/renewal/mod.rs new file mode 100644 index 0000000..f47c9fd --- /dev/null +++ b/src/endpoints/renewal/mod.rs @@ -0,0 +1 @@ +pub mod get_renewal_data; diff --git a/src/endpoints/starkscan/fetch_nfts.rs b/src/endpoints/starkscan/fetch_nfts.rs new file mode 100644 index 0000000..331ad2b --- /dev/null +++ b/src/endpoints/starkscan/fetch_nfts.rs @@ -0,0 +1,84 @@ +use crate::{ + models::AppState, + utils::{get_error, to_hex}, +}; +use axum::{ + extract::{Query, State}, + http::StatusCode, + response::{IntoResponse, Json}, +}; +use mongodb::bson::doc; +use serde::{Deserialize, Serialize}; +use serde_json::Value; +use starknet::core::types::FieldElement; +use std::sync::Arc; + +#[derive(Deserialize)] +pub struct FetchNftsQuery { + addr: FieldElement, + next_url: Option, +} + +#[derive(Serialize, Deserialize, Debug)] +pub struct StarkscanApiResult { + data: Vec, + next_url: Option, +} + +#[derive(Serialize, Deserialize, Debug)] +pub struct StarkscanNftProps { + animation_url: Option, + attributes: Option, + contract_address: String, + description: Option, + external_url: Option, + image_url: Option, + image_medium_url: Option, + image_small_url: Option, + minted_at_transaction_hash: Option, + minted_by_address: Option, + token_id: String, + name: Option, + nft_id: Option, + token_uri: Option, + minted_at_timestamp: i64, +} + +pub async fn handler( + State(state): State>, + Query(query): Query, +) -> impl IntoResponse { + let url = if let Some(next_url) = &query.next_url { + next_url.clone() + } else { + format!( + "{}nfts?owner_address={}", + state.conf.starkscan.api_url, + to_hex(&query.addr) + ) + }; + + let client = reqwest::Client::new(); + match client + .get(&url) + .header("accept", "application/json") + .header("x-api-key", state.conf.starkscan.api_key.clone()) + .send() + .await + { + Ok(response) => match response.text().await { + Ok(text) => match serde_json::from_str::(&text) { + Ok(res) => (StatusCode::OK, Json(res)).into_response(), + Err(e) => get_error(format!( + "Failed to deserialize result from Starkscan API: {} for response: {}", + e, text + )), + }, + Err(e) => get_error(format!( + "Failed to get JSON response while fetching user NFT data: {}", + e + )), + }, + Err(e) => get_error(format!("Failed to fetch user NFTs from API: {}", e)), + } +} diff --git a/src/endpoints/starkscan/mod.rs b/src/endpoints/starkscan/mod.rs new file mode 100644 index 0000000..1c7247c --- /dev/null +++ b/src/endpoints/starkscan/mod.rs @@ -0,0 +1 @@ +pub mod fetch_nfts; diff --git a/src/endpoints/stats/count_addrs.rs b/src/endpoints/stats/count_addrs.rs new file mode 100644 index 0000000..5cb5247 --- /dev/null +++ b/src/endpoints/stats/count_addrs.rs @@ -0,0 +1,61 @@ +use crate::{models::AppState, utils::get_error}; +use axum::{ + extract::{Query, State}, + http::{HeaderMap, HeaderValue, StatusCode}, + response::IntoResponse, + Json, +}; +use futures::StreamExt; +use mongodb::bson::doc; +use serde::{Deserialize, Serialize}; +use std::sync::Arc; + +#[derive(Serialize)] +pub struct CountAddrsData { + count: i32, +} + +#[derive(Deserialize)] +pub struct CountAddrsQuery { + since: i64, +} + +pub async fn handler( + State(state): State>, + Query(query): Query, +) -> impl IntoResponse { + let mut headers = HeaderMap::new(); + headers.insert("Cache-Control", HeaderValue::from_static("max-age=60")); + + let domain_collection = state + .starknetid_db + .collection::("domains"); + let aggregate_cursor = domain_collection + .aggregate( + vec![ + doc! { "$match": { "_cursor.to": null, "creation_date": { "$gte": query.since } }}, + doc! { "$group": { "_id": "$legacy_address" }}, + doc! { "$count": "total" }, + ], + None, + ) + .await; + + match aggregate_cursor { + Ok(mut cursor) => { + if let Some(result) = cursor.next().await { + match result { + Ok(doc_) => { + let count = doc_.get_i32("total").unwrap_or(0); + let response_data = CountAddrsData { count }; + (StatusCode::OK, headers, Json(response_data)).into_response() + } + Err(e) => get_error(format!("Error while processing the document: {:?}", e)), + } + } else { + get_error("No documents found".to_string()) + } + } + Err(e) => get_error(format!("Error while fetching from database: {:?}", e)), + } +} diff --git a/src/endpoints/stats/count_club_domains.rs b/src/endpoints/stats/count_club_domains.rs new file mode 100644 index 0000000..3598c6f --- /dev/null +++ b/src/endpoints/stats/count_club_domains.rs @@ -0,0 +1,204 @@ +use crate::models::AppState; +use axum::{ + extract::{Query, State}, + http::{HeaderMap, HeaderValue, StatusCode}, + response::IntoResponse, + Json, +}; +use futures::TryStreamExt; +use mongodb::bson::{self, doc}; +use serde::{Deserialize, Serialize}; +use std::sync::Arc; +use std::collections::HashMap; + +#[derive(Serialize)] +pub struct CountClubDomainsData { + club: String, + count: i32, +} + +#[derive(Deserialize)] +pub struct CountClubDomainsQuery { + since: i64, +} + +pub async fn handler( + State(state): State>, + Query(query): Query, +) -> impl IntoResponse { + let mut headers = HeaderMap::new(); + headers.insert("Cache-Control", HeaderValue::from_static("max-age=60")); + + let domain_collection = state.starknetid_db.collection::("domains"); + let subdomain_collection = state + .starknetid_db + .collection::("custom_resolutions"); + + let subdomain_output = subdomain_collection + .aggregate( + vec![ + doc! { + "$match": { + "_cursor.to": null, + // todo: uncomment when there is a creation_date in the collection custom_resolutions + // "creation_date": { + // "$gte": query.since, + // } + } + }, + doc! { + "$group": { + "_id": { + "$cond": [ + { + "$eq": ["$resolver", "0x0660b2cd3c93528d4edf790610404414ba3f03e0d45c814d686d628583cb34de"] + }, + "braavos", + { + "$cond": [ + { + "$eq": ["$resolver", "0x4942ebdc9fc996a42adb4a825e9070737fe68cef32a64a616ba5528d457812e"] + }, + "xplorer", + "none" + ] + } + ] + }, + "count": { + "$sum": 1 + } + } + }, + doc! { + "$project": { + "_id": 0, + "club": "$_id", + "count": "$count" + } + }, + ], + None, + ) + .await + .unwrap() + .try_collect::>() + .await + .unwrap(); + + let db_output = domain_collection.aggregate(vec![ + doc! { + "$match": { + "_cursor.to": null, + "creation_date": { + "$gte": query.since, + } + } + }, + doc! { + "$group": { + "_id": { + "$cond": [ + {"$regexMatch": {"input": "$domain", "regex": r"^.\.stark$"}}, + "single_letter", + { "$cond": [ + {"$regexMatch": {"input": "$domain", "regex": r"^\d{2}\.stark$"}}, + "99", + { "$cond": [ + {"$regexMatch": {"input": "$domain", "regex": r"^.{2}\.stark$"}}, + "two_letters", + {"$cond": [ + { "$regexMatch": {"input": "$domain", "regex": r"^\d{3}\.stark$"}}, + "999", + {"$cond": [ + {"$regexMatch": { "input": "$domain", "regex": r"^.{3}\.stark$"}}, + "three_letters", + {"$cond": [ + { "$regexMatch": { "input": "$domain", "regex": r"^\d{4}\.stark$" }}, + "10k", + {"$cond": [ + {"$regexMatch": {"input": "$domain", "regex": r"^.{4}\.stark$"}}, + "four_letters", + {"$cond": [ + { "$regexMatch": {"input": "$domain", "regex": r"^.*\.vip\.stark$"}}, + "og", + {"$cond": [ + {"$regexMatch": {"input": "$domain", "regex": r"^.*\.everai\.stark$"}}, + "everai", + { "$cond": [ + {"$regexMatch": { "input": "$domain","regex": r"^.*\.onsheet\.stark$" }}, + "onsheet", + "none", + ]}, + ]}, + ]}, + ]}, + ]} + ]}, + ]}, + ]}, + ]}, + ], + }, + "count": { + "$sum": 1 + } + } + }, + doc! { + "$project": { + "_id": 0, + "club": "$_id", + "count": "$count" + } + } + ], None).await.unwrap().try_collect::>().await.unwrap(); + + let mut count_99 = 0; + let mut count_999 = 0; + let mut count_10k = 0; + + let mut output: Vec> = Vec::new(); + let mut output_map: HashMap = HashMap::new(); + + for doc in &db_output { + if let Ok(club) = doc.get_str("club") { + match club { + "99" => count_99 = doc.get_i32("count").unwrap_or_default(), + "999" => count_999 = doc.get_i32("count").unwrap_or_default(), + "10k" => count_10k = doc.get_i32("count").unwrap_or_default(), + _ => (), + } + } + } + + for doc in db_output { + if let Ok(club) = doc.get_str("club") { + match club { + "two_letters" => { + output_map.insert(club.to_string(), doc.get_i32("count").unwrap_or_default() + count_99); + } + "three_letters" => { + output_map.insert(club.to_string(), doc.get_i32("count").unwrap_or_default() + count_999); + } + "four_letters" => { + output_map.insert(club.to_string(), doc.get_i32("count").unwrap_or_default() + count_10k); + } + _ => { + output_map.insert(club.to_string(), doc.get_i32("count").unwrap_or_default()); + } + } + } + output.push(output_map.clone()); + output_map.clear(); + } + + for doc in subdomain_output { + output_map.insert(doc.get_str("club").unwrap_or_default().to_string(), doc.get_i32("count").unwrap_or_default()); + output_map.insert(doc.get_str("club").unwrap_or_default().to_string(), doc.get_i32("count").unwrap_or_default()); + output.push(output_map.clone()); + output_map.clear(); + } + + (StatusCode::OK, headers, Json(output)).into_response() +} diff --git a/src/endpoints/stats/count_created.rs b/src/endpoints/stats/count_created.rs new file mode 100644 index 0000000..d268292 --- /dev/null +++ b/src/endpoints/stats/count_created.rs @@ -0,0 +1,104 @@ +use crate::{models::AppState, utils::get_error}; +use axum::{ + extract::{Query, State}, + http::{HeaderMap, HeaderValue, StatusCode}, + response::IntoResponse, + Json, +}; +use futures::StreamExt; +use mongodb::bson::doc; +use serde::{Deserialize, Serialize}; +use std::sync::Arc; + +#[derive(Serialize)] +pub struct CountCreatedData { + from: i64, + count: i32, +} + +#[derive(Deserialize)] +pub struct CountCreatedQuery { + begin: i64, + end: i64, + segments: i64, +} + +pub async fn handler( + State(state): State>, + Query(query): Query, +) -> impl IntoResponse { + let begin_time = query.begin; + let end_time = query.end; + let delta_time = ((end_time as f64 - begin_time as f64) / query.segments as f64).round() as i64; + + if delta_time > 3600 { + let mut headers = HeaderMap::new(); + headers.insert("Cache-Control", HeaderValue::from_static("max-age=60")); + + let domain_collection = state.starknetid_db.collection::("domains"); + + let pipeline = vec![ + doc! { + "$match": { + "_cursor.to": null, + "creation_date": { + "$gte": begin_time, + "$lte": end_time + } + } + }, + doc! { + "$group": { + "_id": { + "$floor": { + "$sum": [ + { + "$subtract": [ + { + "$subtract": ["$creation_date", begin_time] + }, + { + "$mod": [ + { + "$subtract": ["$creation_date", begin_time] + }, + delta_time + ] + } + ] + }, + begin_time + ] + } + }, + "count": { + "$sum": 1 + } + } + }, + doc! { + "$project": { + "_id": 0, + "from": "$_id", + "count": "$count" + } + }, + ]; + + let cursor = domain_collection.aggregate(pipeline, None).await.unwrap(); + let result: Vec = cursor + .map(|doc| { + let doc = doc.unwrap(); + let from: i64 = doc.get_i64("from").unwrap(); + let count = doc.get_i32("count").unwrap(); + + CountCreatedData { from, count } + }) + .collect::>() + .await; + + (StatusCode::OK, headers, Json(result)).into_response() + } else { + get_error("delta must be greater than 3600 seconds".to_string()) + } +} diff --git a/src/endpoints/stats/count_domains.rs b/src/endpoints/stats/count_domains.rs new file mode 100644 index 0000000..5396480 --- /dev/null +++ b/src/endpoints/stats/count_domains.rs @@ -0,0 +1,45 @@ +use crate::{models::AppState, utils::get_error}; +use axum::{ + extract::{Query, State}, + http::{HeaderMap, HeaderValue, StatusCode}, + response::IntoResponse, + Json, +}; +use mongodb::bson::doc; +use serde::{Deserialize, Serialize}; +use std::sync::Arc; + +#[derive(Serialize)] +pub struct CountDomainsData { + count: u64, +} + +#[derive(Deserialize)] +pub struct CountDomainsQuery { + since: i64, +} + +pub async fn handler( + State(state): State>, + Query(query): Query, +) -> impl IntoResponse { + let mut headers = HeaderMap::new(); + headers.insert("Cache-Control", HeaderValue::from_static("max-age=60")); + + let domain_collection = state.starknetid_db.collection::("domains"); + let filter = doc! { + "expiry": { "$gte": chrono::Utc::now().timestamp() }, + "creation_date": { "$gte": query.since }, + "_cursor.to": { "$eq": null }, + }; + + let total = domain_collection.count_documents(filter, None).await; + + match total { + Ok(count) => { + let response_data = CountDomainsData { count }; + (StatusCode::OK, headers, Json(response_data)).into_response() + } + Err(e) => get_error(format!("Error while fetching from database: {:?}", e)), + } +} diff --git a/src/endpoints/stats/count_ids.rs b/src/endpoints/stats/count_ids.rs new file mode 100644 index 0000000..a03429d --- /dev/null +++ b/src/endpoints/stats/count_ids.rs @@ -0,0 +1,47 @@ +use crate::{models::AppState, utils::get_error}; +use axum::{ + extract::{Query, State}, + http::{HeaderMap, HeaderValue, StatusCode}, + response::IntoResponse, + Json, +}; +use mongodb::bson::doc; +use serde::{Deserialize, Serialize}; +use std::sync::Arc; + +#[derive(Serialize)] +pub struct CountDomainsData { + count: u64, +} + +#[derive(Deserialize)] +pub struct CountDomainsQuery { + since: i64, +} + +pub async fn handler( + State(state): State>, + Query(query): Query, +) -> impl IntoResponse { + let mut headers = HeaderMap::new(); + headers.insert("Cache-Control", HeaderValue::from_static("max-age=60")); + + let domain_collection = state + .starknetid_db + .collection::("domains"); + let filter = doc! { + "expiry": { "$gte": chrono::Utc::now().timestamp() }, + "creation_date": { "$gte": query.since }, + "_cursor.to": { "$eq": null }, + }; + + let total = domain_collection.count_documents(filter, None).await; + + match total { + Ok(count) => { + let response_data = CountDomainsData { count }; + (StatusCode::OK, headers, Json(response_data)).into_response() + } + Err(e) => get_error(format!("Error while fetching from database: {:?}", e)), + } +} diff --git a/src/endpoints/stats/expired_club_domains.rs b/src/endpoints/stats/expired_club_domains.rs new file mode 100644 index 0000000..ad03394 --- /dev/null +++ b/src/endpoints/stats/expired_club_domains.rs @@ -0,0 +1,100 @@ +use crate::{models::AppState, utils::get_error}; +use axum::{ + extract::State, + http::{HeaderMap, HeaderValue, StatusCode}, + response::IntoResponse, + Json, +}; +use futures::StreamExt; +use mongodb::{bson::doc, options::AggregateOptions}; +use serde::Serialize; +use std::sync::Arc; + +#[derive(Serialize)] +pub struct CountClubDomainsData { + club: String, + count: i32, +} + +pub async fn handler(State(state): State>) -> impl IntoResponse { + let mut headers = HeaderMap::new(); + headers.insert("Cache-Control", HeaderValue::from_static("max-age=60")); + + let domain_collection = state + .starknetid_db + .collection::("domains"); + let current = chrono::Utc::now().timestamp(); + + let pipeline = vec![ + doc! { + "$match": { + "_cursor.to": null, + "expiry": { + "$lte": current, + } + } + }, + doc! { + "$project": { + "domain": "$domain", + "club": { + "$cond": [ + { "$regexMatch": { "input": "$domain", "regex": r"^.\.stark$" }}, + "single_letter", + { "$cond": [ + { "$regexMatch": { "input": "$domain", "regex": r"^\d{2}\.stark$" }}, + "99", + { "$cond": [ + { "$regexMatch": { "input": "$domain", "regex": r"^.{2}\.stark$" }}, + "two_letters", + { "$cond": [ + { "$regexMatch": { "input": "$domain", "regex": r"^\d{3}\.stark$" }}, + "999", + { "$cond": [ + { "$regexMatch": { "input": "$domain", "regex": r"^.{3}\.stark$" }}, + "three_letters", + { "$cond": [ + { "$regexMatch": { "input": "$domain", "regex": r"^\d{4}\.stark" }}, + "10k", + "none" + ]} + ]} + ]} + ]} + ]} + ]} + } + }, + doc! { + "$match": { + "club": { "$ne": "none" } + } + }, + ]; + + let options = AggregateOptions::builder().build(); + let aggregate_cursor = domain_collection.aggregate(pipeline, options).await; + + match aggregate_cursor { + Ok(mut cursor) => { + let mut output = Vec::new(); + while let Some(result) = cursor.next().await { + match result { + Ok(doc) => { + if let Ok(domain) = doc.get_str("domain") { + if let Ok(club) = doc.get_str("club") { + output.push(doc! { "domain": domain, "club": club }); + } + } + } + Err(e) => println!("Error: {}", e), + } + } + if output.is_empty() { + return get_error("No documents found".to_string()); + } + (StatusCode::OK, headers, Json(output)).into_response() + } + Err(e) => get_error(format!("Error while fetching from database: {:?}", e)), + } +} diff --git a/src/endpoints/stats/mod.rs b/src/endpoints/stats/mod.rs new file mode 100644 index 0000000..4e7c8f8 --- /dev/null +++ b/src/endpoints/stats/mod.rs @@ -0,0 +1,6 @@ +pub mod count_addrs; +pub mod count_club_domains; +pub mod count_created; +pub mod count_domains; +pub mod count_ids; +pub mod expired_club_domains; diff --git a/src/main.rs b/src/main.rs index 2482c72..51560ce 100644 --- a/src/main.rs +++ b/src/main.rs @@ -1,3 +1,5 @@ +#![recursion_limit = "256"] + mod config; mod endpoints; mod models; @@ -92,6 +94,38 @@ async fn main() { "/referral/click_count", get(endpoints::referral::click_count::handler), ) + .route( + "/stats/count_addrs", + get(endpoints::stats::count_addrs::handler), + ) + .route( + "/stats/count_club_domains", + get(endpoints::stats::count_club_domains::handler), + ) + .route( + "/stats/count_domains", + get(endpoints::stats::count_domains::handler), + ) + .route( + "/stats/count_ids", + get(endpoints::stats::count_ids::handler), + ) + .route( + "/stats/count_created", + get(endpoints::stats::count_created::handler), + ) + .route( + "/stats/expired_club_domains", + get(endpoints::stats::expired_club_domains::handler), + ) + .route( + "/starkscan/fetch_nfts", + get(endpoints::starkscan::fetch_nfts::handler), + ) + .route( + "/renewal/get_renewal_data", + get(endpoints::renewal::get_renewal_data::handler), + ) .route("/galxe/verify", post(endpoints::galxe::verify::handler)) .with_state(shared_state) .layer(cors);