From 434411e44a53ab302d7d2471ab9b4c9a4957b907 Mon Sep 17 00:00:00 2001 From: Dmitry Balashov <43530070+0x009922@users.noreply.github.com> Date: Fri, 8 Dec 2023 13:58:13 +0700 Subject: [PATCH] [docs]: remove auto-generated config reference Signed-off-by: Dmitry Balashov <43530070+0x009922@users.noreply.github.com> --- .github/workflows/iroha2-dev-pr-label.yml | 3 +- .github/workflows/iroha2-dev-pr.yml | 3 - CONTRIBUTING.md | 15 +- README.md | 10 +- cli/README.md | 22 +- config/base/derive/src/documented.rs | 263 ------- config/base/derive/src/lib.rs | 12 +- config/base/derive/src/proxy.rs | 4 +- config/base/src/lib.rs | 147 +--- config/base/tests/simple.rs | 206 ------ config/src/block_sync.rs | 4 +- config/src/client.rs | 6 +- config/src/genesis.rs | 4 +- config/src/iroha.rs | 4 +- config/src/kura.rs | 4 +- config/src/live_query_store.rs | 4 +- config/src/logger.rs | 4 +- config/src/network.rs | 4 +- config/src/queue.rs | 4 +- config/src/snapshot.rs | 4 +- config/src/sumeragi.rs | 4 +- config/src/telemetry.rs | 4 +- config/src/torii.rs | 4 +- config/src/wasm.rs | 4 +- config/src/wsv.rs | 4 +- docs/README.md | 3 +- docs/source/references/config.md | 835 ---------------------- hooks/pre-commit.sample | 3 +- scripts/tests/consistency.sh | 5 - tools/kagami/src/docs.rs | 130 ---- tools/kagami/src/main.rs | 4 - 31 files changed, 51 insertions(+), 1676 deletions(-) delete mode 100644 config/base/derive/src/documented.rs delete mode 100644 config/base/tests/simple.rs delete mode 100644 docs/source/references/config.md delete mode 100644 tools/kagami/src/docs.rs diff --git a/.github/workflows/iroha2-dev-pr-label.yml b/.github/workflows/iroha2-dev-pr-label.yml index cbf581bd692..4eb18953a92 100644 --- a/.github/workflows/iroha2-dev-pr-label.yml +++ b/.github/workflows/iroha2-dev-pr-label.yml @@ -5,7 +5,6 @@ on: branches: [iroha-dev] paths: - 'docs/source/references/schema.json' - - 'docs/source/references/config.md' jobs: api-changes: @@ -30,7 +29,7 @@ jobs: continue-on-error: true id: config_label - uses: actions-ecosystem/action-add-labels@v1 - if: contains(steps.config_label.outputs.added_modified, 'docs/source/references/config.md') + if: contains(steps.config_label.outputs.added_modified) with: github_token: ${{ secrets.github_token }} labels: | diff --git a/.github/workflows/iroha2-dev-pr.yml b/.github/workflows/iroha2-dev-pr.yml index b78ede1a64a..9ba479df65d 100644 --- a/.github/workflows/iroha2-dev-pr.yml +++ b/.github/workflows/iroha2-dev-pr.yml @@ -25,9 +25,6 @@ jobs: steps: - uses: actions/checkout@v4 - uses: Swatinem/rust-cache@v2 - - name: Check config.md - if: always() - run: ./scripts/tests/consistency.sh docs - name: Check genesis.json if: always() run: ./scripts/tests/consistency.sh genesis diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 4869dd4e4c0..74bafed0f92 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -18,14 +18,13 @@ New to our project? [Make your first contribution](#your-first-code-contribution ### TL;DR -* Find [ZenHub](https://app.zenhub.com/workspaces/iroha-v2-60ddb820813b9100181fc060/board?repos=181739240). -* Fork [Iroha](https://github.com/hyperledger/iroha/tree/iroha2-dev). -* Fix your issue of choice. -* Ensure you follow our [style guides](#style-guides) for code and documentation. -* Write [tests](https://doc.rust-lang.org/cargo/commands/cargo-test.html). Ensure they all pass (`cargo test`). -* Fix [`clippy`](https://lib.rs/crates/cargo-lints) warnings: `cargo lints clippy --workspace --benches --tests --examples --all-features`. -* Format code `cargo +nightly fmt --all` and generate docs `cargo run --bin kagami -- docs >"docs/source/references/config.md" && git add "docs/source/references/config.md"`. -* With the `upstream` set to track [Hyperledger Iroha repository](https://github.com/hyperledger/iroha), `git pull -r upstream iroha2-dev`, `git commit -s`, `git push `, and [create a pull request](https://github.com/hyperledger/iroha/compare) to the `iroha2-dev` branch. Ensure the PR has the `[type] #: Description` [title](#pull-request-titles). +- Find [ZenHub](https://app.zenhub.com/workspaces/iroha-v2-60ddb820813b9100181fc060/board?repos=181739240). +- Fork [Iroha](https://github.com/hyperledger/iroha/tree/iroha2-dev). +- Fix your issue of choice. +- Ensure you follow our [style guides](#style-guides) for code and documentation. +- Write [tests](https://doc.rust-lang.org/cargo/commands/cargo-test.html). Ensure they all pass (`cargo test --workspace`). +- Perform pre-commit routine like formatting & artifacts regeneration (see [`pre-commit.sample`](./hooks/pre-commit.sample)) +- With the `upstream` set to track [Hyperledger Iroha repository](https://github.com/hyperledger/iroha), `git pull -r upstream iroha2-dev`, `git commit -s`, `git push `, and [create a pull request](https://github.com/hyperledger/iroha/compare) to the `iroha2-dev` branch. Ensure the PR has the `[type] #: Description` [title](#pull-request-titles). ### Reporting Bugs diff --git a/README.md b/README.md index 5877ff5f40d..d93882849bc 100644 --- a/README.md +++ b/README.md @@ -157,9 +157,7 @@ A brief overview on how to configure and maintain an Iroha instance: ### Configuration -You can provide configuration parameters either as a `config.json` or using environment variables. Refer to the [detailed list](./docs/source/references/config.md) of all available configuration parameters. - -Configuration example you may use as a reference point: [cli/src/samples.rs](./cli/src/samples.rs) +**Note:** this section is under development. You can track it in the [issue](https://github.com/hyperledger/iroha-2-docs/issues/392). ### Endpoints @@ -169,7 +167,7 @@ For a list of all endpoints, available operations, and ways to customize them wi By default, Iroha provides logs in a human-readable format and prints them out to `stdout`. -The logging level can be changed either via the [`logger.level` configuration parameter](./docs/source/references/config.md#loggerlevel) or at run-time using the `configuration` endpoint. +The logging level can be changed either via the `logger.level` configuration parameter or at run-time using the `configuration` endpoint.
Example: changing log level @@ -182,7 +180,7 @@ curl -X POST \ ```
-The log format might be configured via the [`logger.format` configuration parameter](./docs/source/references/config.md#loggerformat). Possible values are: `full` (default), `compact`, `pretty`, and `json`. +The log format might be configured via the `logger.format` configuration parameter. Possible values are: `full` (default), `compact`, `pretty`, and `json`. Output goes to `/dev/stdout`. Piping to files or [log rotation](https://www.commandlinux.com/man-page/man5/logrotate.conf.5.html) is the responsibility of the peer administrator. @@ -218,7 +216,7 @@ We encourage you to check out our [Iroha 2 Tutorial](https://hyperledger.github. * [Glossary](https://hyperledger.github.io/iroha-2-docs/guide/glossary) * [Iroha Special Instructions](https://hyperledger.github.io/iroha-2-docs/guide/blockchain/instructions) * [API Reference](https://hyperledger.github.io/iroha-2-docs/api/torii-endpoints) -* [Configuration Reference](./docs/source/references/config.md) + * [Iroha 2 Whitepaper](./docs/source/iroha_2_whitepaper.md) Iroha SDKs: diff --git a/cli/README.md b/cli/README.md index 9da57cc943e..c5912212253 100644 --- a/cli/README.md +++ b/cli/README.md @@ -74,27 +74,7 @@ Refer to [generating key pairs with `kagami`](../tools/kagami#crypto) for more d ### Configuration file -You must provide a configuration file to run the Iroha peer binary. Iroha will not run with defaults if the configuration file is not available. - -The Iroha binary looks for either a `config.json` file in the current directory or a JSON file in `IROHA2_CONFIG_PATH`. If the configuration file is not valid, the Iroha peer binary exits and does nothing. If neither of these files is provided, all the fields from the default `config.json` should be specified as environment variables. Note that environment variables override the variables in their respective fields provided via `config.json`. - -The environment variables replacing `config.json` should be passed as JSON strings, meaning that any inner quotes should be properly escaped in the command line as shown in the example below. - -
Expand to see the example - -``` bash -IROHA_TORII="{\"P2P_ADDR\": \"127.0.0.1:1339\", \"API_URL\": \"127.0.0.1:8080\"}" IROHA_SUMERAGI="{\"TRUSTED_PEERS\": [{\"address\": \"127.0.0.1:1337\",\"public_key\": \"ed01201C61FAF8FE94E253B93114240394F79A607B7FA55F9E5A41EBEC74B88055768B\"},{\"address\": \"127.0.0.1:1338\",\"public_key\": \"ed0120CC25624D62896D3A0BFD8940F928DC2ABF27CC57CEFEB442AA96D9081AAE58A1\"},{\"address\": \"127.0.0.1:1339\",\"public_key\": \"ed0120FACA9E8AA83225CB4D16D67F27DD4F93FC30FFA11ADC1F5C88FD5495ECC91020\"},{\"address\": \"127.0.0.1:1340\",\"public_key\": \"ed01208E351A70B6A603ED285D666B8D689B680865913BA03CE29FB7D13A166C4E7F1F\"}]}" IROHA_KURA="{\"INIT_MODE\": \"strict\",\"BLOCK_STORE_PATH\": \"./storage\"}" IROHA_BLOCK_SYNC="{\"GOSSIP_PERIOD_MS\": 10000,\"BATCH_SIZE\": 2}" IROHA_PUBLIC_KEY="ed01201C61FAF8FE94E253B93114240394F79A607B7FA55F9E5A41EBEC74B88055768B" IROHA_PRIVATE_KEY="{\"digest_function\": \"ed25519\",\"payload\": \"282ED9F3CF92811C3818DBC4AE594ED59DC1A2F78E4241E31924E101D6B1FB831C61FAF8FE94E253B93114240394F79A607B7FA55F9E5A41EBEC74B88055768B\"}" IROHA_GENESIS="{\"ACCOUNT_PUBLIC_KEY\": \"ed01204CFFD0EE429B1BDD36B3910EC570852B8BB63F18750341772FB46BC856C5CAAF\",\"ACCOUNT_PRIVATE_KEY\": {\"digest_function\": \"ed25519\",\"payload\": \"D748E18CE60CB30DEA3E73C9019B7AF45A8D465E3D71BCC9A5EF99A008205E534CFFD0EE429B1BDD36B3910EC570852B8BB63F18750341772FB46BC856C5CAAF\"}}" ./iroha -``` - -
- -:grey_exclamation: We do not recommend using environment variables for configuration outside docker-compose and Kubernetes deployments. Please change the values in the configuration file instead. That would also help us debug the problems that you might be having. - -The [configuration options reference](../docs/source/references/config.md) provides detailed explanations of each configuration variable. You may use the [sample configuration file](../configs/peer/config.json) for quick testing. - -One of the peers on your network must be provided with the genesis block, which is either `IROHA2_GENESIS_PATH` or `genesis.json` in the working directory. -Check [configuration options](https://github.com/hyperledger/iroha/blob/iroha2-dev/docs/source/references/config.md#genesis) for details. -Learn more about the genesis block in [our tutorial](https://hyperledger.github.io/iroha-2-docs/guide/configure/genesis.html). +**Note:** this section is under development. You can track it in the [issue](https://github.com/hyperledger/iroha-2-docs/issues/392). ## Deployment diff --git a/config/base/derive/src/documented.rs b/config/base/derive/src/documented.rs deleted file mode 100644 index 604399061fa..00000000000 --- a/config/base/derive/src/documented.rs +++ /dev/null @@ -1,263 +0,0 @@ -use proc_macro::TokenStream; -use proc_macro2::Span; -use quote::quote; -use syn::{parse_quote, Lit, LitStr, Meta, Path}; - -use super::utils::{get_inner_type, StructWithFields}; - -pub fn impl_documented(ast: &StructWithFields) -> TokenStream { - let name = &ast.ident; - let docs = gen_docs(ast); - - let get_docs = impl_get_docs(docs.clone(), ast); - let get_inner_docs = impl_get_inner_docs(docs.clone(), ast); - let get_doc_recursive = impl_get_doc_recursive(docs, ast); - - let get_recursive = impl_get_recursive(ast); - - let out = quote! { - impl ::iroha_config_base::proxy::Documented for #name { - type Error = ::iroha_config_base::derive::Error; - - #get_recursive - #get_doc_recursive - #get_docs - #get_inner_docs - } - }; - out.into() -} - -fn impl_get_doc_recursive(docs: Vec, ast: &StructWithFields) -> proc_macro2::TokenStream { - if ast.fields.is_empty() { - return quote! { - fn get_doc_recursive<'a>( - inner_field: impl AsRef<[&'a str]>, - ) -> core::result::Result, ::iroha_config_base::derive::Error> - { - Err(::iroha_config_base::derive::Error::UnknownField( - ::iroha_config_base::derive::Field( - inner_field.as_ref().iter().map(ToString::to_string).collect() - ))) - } - }; - } - - let variants = ast.fields - .iter() - .zip(docs) - .map(|(field, documentation)| { - let ty = &field.ty; - let ident = &field.ident; - let documented_trait: Path = parse_quote! { iroha_config_base::proxy::Documented }; - if field.has_inner && field.has_option { - let inner_ty = get_inner_type("Option", &field.ty); - quote! { - [stringify!(#ident)] => { - let curr_doc = #documentation; - let inner_docs = <#inner_ty as #documented_trait>::get_inner_docs(); - let total_docs = format!("{}\n\nHas following fields:\n\n{}\n", curr_doc, inner_docs); - Some(total_docs) - }, - [stringify!(#ident), rest @ ..] => <#inner_ty as #documented_trait>::get_doc_recursive(rest)?, - } - } else if field.has_inner { - quote! { - [stringify!(#ident)] => { - let curr_doc = #documentation; - let inner_docs = <#ty as #documented_trait>::get_inner_docs(); - let total_docs = format!("{}\n\nHas following fields:\n\n{}\n", curr_doc, inner_docs); - Some(total_docs) - }, - [stringify!(#ident), rest @ ..] => <#ty as #documented_trait>::get_doc_recursive(rest)?, - } - } else { - quote! { [stringify!(#ident)] => Some(#documentation.to_owned()), } - } - }); - - quote! { - fn get_doc_recursive<'a>( - inner_field: impl AsRef<[&'a str]>, - ) -> core::result::Result, ::iroha_config_base::derive::Error> - { - let inner_field = inner_field.as_ref(); - let doc = match inner_field { - #(#variants)* - field => return Err(::iroha_config_base::derive::Error::UnknownField( - ::iroha_config_base::derive::Field( - field.iter().map(ToString::to_string).collect() - ))), - }; - Ok(doc) - } - } -} - -fn impl_get_inner_docs(docs: Vec, ast: &StructWithFields) -> proc_macro2::TokenStream { - let inserts = ast.fields.iter().zip(docs).map(|(field, documentation)| { - let ty = &field.ty; - let ident = &field.ident; - let documented_trait: Path = parse_quote! { ::iroha_config_base::proxy::Documented }; - let doc = if field.has_inner && field.has_option { - let inner_ty = get_inner_type("Option", &field.ty); - quote! { - <#inner_ty as #documented_trait>::get_inner_docs().as_str() - } - } else if field.has_inner { - quote! { <#ty as #documented_trait>::get_inner_docs().as_str() } - } else { - quote! { #documentation.into() } - }; - - quote! { - inner_docs.push_str(stringify!(#ident)); - inner_docs.push_str(": "); - inner_docs.push_str(#doc); - inner_docs.push_str("\n\n"); - } - }); - - quote! { - fn get_inner_docs() -> String { - let mut inner_docs = String::new(); - #(#inserts)* - inner_docs - } - } -} - -fn impl_get_docs(docs: Vec, ast: &StructWithFields) -> proc_macro2::TokenStream { - let inserts = ast.fields.iter().zip(docs).map(|(field, documentation)| { - let ident = &field.ident; - let ty = &field.ty; - let documented_trait: Path = parse_quote! { iroha_config_base::proxy::Documented }; - let doc = if field.has_inner && field.has_option { - let inner_ty = get_inner_type("Option", &field.ty); - quote! { <#inner_ty as #documented_trait>::get_docs().into() } - } else if field.has_inner { - quote! { <#ty as #documented_trait>::get_docs().into() } - } else { - quote! { #documentation.into() } - }; - - quote! { map.insert(stringify!(#ident).to_owned(), #doc); } - }); - - quote! { - fn get_docs() -> serde_json::Value { - let mut map = serde_json::Map::new(); - #(#inserts)* - map.into() - } - } -} - -fn impl_get_recursive(ast: &StructWithFields) -> proc_macro2::TokenStream { - if ast.fields.is_empty() { - return quote! { - fn get_recursive<'a, T>( - &self, - inner_field: T, - ) -> ::iroha_config_base::BoxedFuture<'a, core::result::Result> - where - T: AsRef<[&'a str]> + Send + 'a, - { - Err(::iroha_config_base::derive::Error::UnknownField( - ::iroha_config_base::derive::Field( - inner_field.as_ref().iter().map(ToString::to_string).collect() - ))) - } - }; - } - - let variants = ast.fields - .iter() - .map(|field | { - let ident = &field.ident; - let l_value = &field.lvalue_read; - let inner_thing2 = if field.has_inner && field.has_option { - let inner_ty = get_inner_type("Option", &field.ty); - let documented_trait: Path = parse_quote! { iroha_config_base::proxy::Documented }; - quote! { - [stringify!(#ident), rest @ ..] => { - <#inner_ty as #documented_trait>::get_recursive(#l_value.as_ref().expect("Should be instantiated"), rest)? - }, - } - } else if field.has_inner { - quote! { - [stringify!(#ident), rest @ ..] => { - #l_value.get_recursive(rest)? - }, - } - } else { - quote! {} - }; - quote! { - [stringify!(#ident)] => { - serde_json::to_value(&#l_value) - .map_err( - |error| - ::iroha_config_base::derive::Error::field_deserialization_from_json( - stringify!(#ident), - &error - ) - )? - } - #inner_thing2 - } - }); - - quote! { - fn get_recursive<'a, T>( - &self, - inner_field: T, - ) -> core::result::Result - where - T: AsRef<[&'a str]> + Send + 'a, - { - let inner_field = inner_field.as_ref(); - let value = match inner_field { - #(#variants)* - field => return Err(::iroha_config_base::derive::Error::UnknownField( - ::iroha_config_base::derive::Field( - field.iter().map(ToString::to_string).collect() - ))), - }; - Ok(value) - } - } -} - -/// Generate documentation for all fields based on their type and already existing documentation -pub fn gen_docs(ast: &StructWithFields) -> Vec { - ast.fields - .iter() - .map(|field| { - let field_ty = &field.ty; - let env = &field.env_str; - let real_doc = field - .attrs - .iter() - .filter_map(|attr| attr.parse_meta().ok()) - .find_map(|metadata| { - if let Meta::NameValue(meta) = metadata { - if meta.path.is_ident("doc") { - if let Lit::Str(s) = meta.lit { - return Some(s); - } - } - } - None - }); - let real_doc = real_doc.map(|doc| doc.value() + "\n\n").unwrap_or_default(); - let docs = format!( - "{}Has type `{}`[^1]. Can be configured via environment variable `{}`", - real_doc, - quote! { #field_ty }.to_string().replace(' ', ""), - env - ); - LitStr::new(&docs, Span::mixed_site()) - }) - .collect::>() -} diff --git a/config/base/derive/src/lib.rs b/config/base/derive/src/lib.rs index f86d6af896b..0cd24e4e345 100644 --- a/config/base/derive/src/lib.rs +++ b/config/base/derive/src/lib.rs @@ -2,13 +2,12 @@ use proc_macro::TokenStream; -pub(crate) mod documented; pub(crate) mod proxy; pub(crate) mod utils; pub(crate) mod view; /// Derive for config loading. More details in `iroha_config_base` reexport -#[proc_macro_derive(Override)] +#[proc_macro_derive(Override, attributes(config))] pub fn override_derive(input: TokenStream) -> TokenStream { let ast = syn::parse_macro_input!(input as utils::StructWithFields); proxy::impl_override(&ast) @@ -37,19 +36,12 @@ pub fn load_from_disk_derive(input: TokenStream) -> TokenStream { } /// Derive for config querying and setting. More details in `iroha_config_base` reexport -#[proc_macro_derive(Proxy)] +#[proc_macro_derive(Proxy, attributes(config))] pub fn proxy_derive(input: TokenStream) -> TokenStream { let ast = syn::parse_macro_input!(input as utils::StructWithFields); proxy::impl_proxy(ast) } -/// Derive for config querying and setting. More details in `iroha_config_base` reexport -#[proc_macro_derive(Documented, attributes(config))] -pub fn documented_derive(input: TokenStream) -> TokenStream { - let ast = syn::parse_macro_input!(input as utils::StructWithFields); - documented::impl_documented(&ast) -} - /// Generate view for given struct and convert from type to its view. /// More details in `iroha_config_base` reexport. #[proc_macro] diff --git a/config/base/derive/src/proxy.rs b/config/base/derive/src/proxy.rs index 7a1e170f2e5..dafef4c6145 100644 --- a/config/base/derive/src/proxy.rs +++ b/config/base/derive/src/proxy.rs @@ -14,7 +14,6 @@ pub fn impl_proxy(ast: StructWithFields) -> TokenStream { let disk_derive = quote! { ::iroha_config_base::derive::LoadFromDisk }; let builder_derive = quote! { ::iroha_config_base::derive::Builder }; let override_derive = quote! { ::iroha_config_base::derive::Override }; - let documented_derive = quote! { ::iroha_config_base::derive::Documented }; quote! { /// Proxy configuration structure to be used as an intermediate /// for configuration loading. Both loading from disk and @@ -24,8 +23,7 @@ pub fn impl_proxy(ast: StructWithFields) -> TokenStream { #builder_derive, #loadenv_derive, #disk_derive, - #override_derive, - #documented_derive + #override_derive )] #[builder(parent = #parent_ty)] #proxy_struct diff --git a/config/base/src/lib.rs b/config/base/src/lib.rs index d8f40d64c21..7ea61d35ddb 100644 --- a/config/base/src/lib.rs +++ b/config/base/src/lib.rs @@ -2,7 +2,6 @@ use std::{fmt::Debug, path::Path}; use serde::{de::DeserializeOwned, Deserialize, Deserializer, Serialize}; -use serde_json::Value; pub mod derive { //! Derives for configuration entities @@ -100,34 +99,6 @@ pub mod derive { /// ``` pub use iroha_config_derive::Builder; /// Derive macro for implementing the trait - /// [`iroha_config::base::proxy::Documented`](`crate::proxy::Documented`) - /// for config structures. - /// - /// Even though this macro doesn't own any attributes, as of now - /// it relies on the `#[config]` attribute defined by the - /// [`iroha_config::base::derive::Override`](`crate::derive::Override`) - /// macro. As such, `#[config(env_prefix = ...)]` is required for - /// generating documentation, and `#[config(inner)]` for getting - /// inner fields recursively. - /// - /// # Examples - /// - /// ```rust - /// use iroha_config_base::derive::Documented; - /// use iroha_config_base::proxy::Documented as _; - /// - /// #[derive(serde::Deserialize, serde::Serialize, Documented)] - /// struct Outer { #[config(inner)] inner: Inner } - /// - /// #[derive(serde::Deserialize, serde::Serialize, Documented)] - /// struct Inner { b: String } - /// - /// let outer = Outer { inner: Inner { b: "a".to_owned() }}; - /// - /// assert_eq!(outer.get_recursive(["inner", "b"]).unwrap(), "a"); - /// ``` - pub use iroha_config_derive::Documented; - /// Derive macro for implementing the trait /// [`iroha_config::base::proxy::LoadFromDisk`](`crate::proxy::LoadFromDisk`) /// trait for config structures. /// @@ -272,38 +243,10 @@ pub mod derive { /// (via [`iroha_config_base::proxy::Builder`](`crate::proxy::Builder`) /// trait) and ways to combine two proxies together (via /// [`iroha_config_base::proxy::Override`](`crate::proxy::Override`)). - /// - /// # Examples - /// - /// ```rust - /// use iroha_config_base::derive::{Documented, Proxy}; - /// - /// // Need `Documented` here as it owns the `#[config]` attribute - /// #[derive(serde::Deserialize, serde::Serialize, Documented, Proxy)] - /// struct Outer { #[config(inner)] inner: Inner } - /// - /// #[derive(serde::Deserialize, serde::Serialize, Documented, Proxy)] - /// struct Inner { b: String } - /// - /// // Will generate something like this - /// // #[derive(Debug, Clone, serde::Deserialize, serde::Serialize, - /// // Builder, Override, Documented, LoadFromEnv, LoadFromDisk)] - /// // #[builder(parent = Outer)] - /// // struct OuterProxy { #[config(inner)] inner: Option } - /// - /// // #[derive(Debug, PartialEq, serde::Deserialize, serde::Serialize, - /// // Builder, Override, Documented, LoadFromEnv, LoadFromDisk)] - /// // struct InnerProxy { b: Option } - /// ``` pub use iroha_config_derive::Proxy; use serde::Deserialize; use thiserror::Error; - // TODO: use VERGEN to point to LTS reference on LTS branch - /// Reference to the current Dev branch configuration - pub static CONFIG_REFERENCE: &str = - "https://github.com/hyperledger/iroha/blob/iroha2-dev/docs/source/references/config.md"; - /// Represents a path to a nested field in a config structure #[derive(Debug, Deserialize)] #[serde(transparent)] @@ -322,14 +265,9 @@ pub mod derive { #[ignore_extra_doc_attributes] #[allow(clippy::enum_variant_names)] pub enum Error { - /// Got unknown field: `{0}` - /// - /// Used in [`Documented`] trait for wrong query errors - UnknownField(Field), - /// Failed to deserialize the field `{field}` /// - /// Used in [`Documented`] and [`super::proxy::LoadFromEnv`] trait for deserialization + /// Used in [`super::proxy::LoadFromEnv`] trait for deserialization /// errors #[serde(skip)] FieldDeserialization { @@ -408,14 +346,6 @@ pub mod derive { } } } - - #[test] - fn unknown_field_fmt() { - assert_eq!( - Error::UnknownField(Field(vec!["a".into(), "b".into()])).to_string(), - "Got unknown field: `a.b`" - ); - } } pub mod view { @@ -450,81 +380,6 @@ pub mod proxy { use super::*; - /// Trait for dynamic and asynchronous configuration via - /// maintenance endpoint for Rust structures - pub trait Documented: Serialize + DeserializeOwned { - /// Error type returned by methods of this trait - type Error; - - /// Return documentation for all fields in a form of a JSON object - fn get_docs() -> Value; - - /// Get inner documentation for non-leaf fields - fn get_inner_docs() -> String; - - /// Return the JSON value of a given field - /// - /// # Errors - /// Fails if field was unknown - #[inline] - fn get(&self, field: &'_ str) -> Result { - self.get_recursive([field]) - } - - /// Get documentation of a given field - /// - /// # Errors - /// Fails if field was unknown - #[inline] - fn get_doc(field: &str) -> Result, Self::Error> { - Self::get_doc_recursive([field]) - } - - /// Return the JSON value of a given inner field of arbitrary - /// inner depth - /// - /// # Errors - /// Fails if field was unknown - fn get_recursive<'tl, T>(&self, inner_field: T) -> Result - where - T: AsRef<[&'tl str]> + Send + 'tl; - - #[allow(single_use_lifetimes)] // Unstable - /// Get documentation of a given inner field of arbitrary depth - /// - /// # Errors - /// Fails if field was unknown - fn get_doc_recursive<'tl>( - field: impl AsRef<[&'tl str]>, - ) -> Result, Self::Error>; - } - - impl Documented for Box { - type Error = T::Error; - - fn get_docs() -> Value { - T::get_docs() - } - - fn get_inner_docs() -> String { - T::get_inner_docs() - } - - fn get_recursive<'tl, U>(&self, inner_field: U) -> Result - where - U: AsRef<[&'tl str]> + Send + 'tl, - { - T::get_recursive(self, inner_field) - } - - #[allow(single_use_lifetimes)] // False-positive - fn get_doc_recursive<'tl>( - field: impl AsRef<[&'tl str]>, - ) -> Result, Self::Error> { - T::get_doc_recursive(field) - } - } - /// Trait for combining two configuration instances pub trait Override: Serialize + DeserializeOwned + Sized { /// If any of the fields in `other` are filled, they diff --git a/config/base/tests/simple.rs b/config/base/tests/simple.rs deleted file mode 100644 index 4de93ca3632..00000000000 --- a/config/base/tests/simple.rs +++ /dev/null @@ -1,206 +0,0 @@ -use std::{collections::HashMap, env::VarError, ffi::OsStr}; - -use iroha_config_base::{ - derive::{Documented, LoadFromEnv, Override}, - proxy::{Documented as _, FetchEnv, LoadFromEnv as _, Override as _}, -}; -use serde::{Deserialize, Serialize}; - -#[derive(Clone, Debug, Deserialize, Serialize, LoadFromEnv, Override)] -#[config(env_prefix = "CONF_")] -struct ConfigurationProxy { - /// Inner structure - #[config(inner)] - inner: Option, - #[config(serde_as_str)] - pub string_wrapper: Option, - pub string: Option, - pub data: Option, -} - -#[derive(Clone, Debug, Deserialize, Serialize, Documented)] -#[config(env_prefix = "CONF_")] -struct Configuration { - /// Inner structure - #[config(inner)] - inner: InnerConfiguration, - #[config(serde_as_str)] - pub string_wrapper: StringWrapper, - pub string: String, - pub data: Data, -} - -impl ConfigurationProxy { - fn new_with_placeholders() -> Self { - Self { - inner: Some(InnerConfigurationProxy { - a: Some("string".to_owned()), - b: Some(42), - }), - string_wrapper: Some(StringWrapper("string".to_owned())), - string: Some("cool string".to_owned()), - data: Some(Data { - key: "key".to_owned(), - value: 34, - }), - } - } - - fn new_with_none() -> Self { - Self { - inner: None, - string_wrapper: None, - string: None, - data: None, - } - } -} - -#[derive(Clone, Debug, PartialEq, Eq, Deserialize, Serialize, LoadFromEnv, Override)] -#[config(env_prefix = "CONF_INNER_")] -struct InnerConfigurationProxy { - pub a: Option, - // From expression - /// Docs from b - pub b: Option, -} - -#[derive(Clone, Debug, PartialEq, Eq, Deserialize, Serialize, Documented)] -#[config(env_prefix = "CONF_INNER_")] -struct InnerConfiguration { - pub a: String, - // From expression - /// Docs from b - pub b: i32, -} -#[derive(Clone, Debug, PartialEq, Eq, Deserialize, Serialize)] -struct Data { - key: String, - value: u64, -} - -#[derive(Clone, Debug, PartialEq, Eq, Deserialize, Serialize)] -struct StringWrapper(String); - -#[test] -fn test_docs() { - assert_eq!( - Configuration::get_doc_recursive(["inner", "b"]).unwrap(), - Some(" Docs from b\n\nHas type `i32`[^1]. Can be configured via environment variable `CONF_INNER_B`".to_owned()) - ); - assert_eq!( - Configuration::get_doc_recursive(["inner", "a"]).unwrap(), - Some( - "Has type `String`[^1]. Can be configured via environment variable `CONF_INNER_A`" - .to_owned() - ) - ); - assert_eq!( - Configuration::get_doc_recursive(["inner"]).unwrap(), - Some(" Inner structure\n\nHas type `InnerConfiguration`[^1]. Can be configured via environment variable `CONF_INNER`\n\nHas following fields:\n\na: Has type `String`[^1]. Can be configured via environment variable `CONF_INNER_A`\n\nb: Docs from b\n\nHas type `i32`[^1]. Can be configured via environment variable `CONF_INNER_B`\n\n\n".to_owned()) - ); -} - -struct TestEnv { - map: HashMap, -} - -impl TestEnv { - fn new() -> Self { - Self { - map: HashMap::new(), - } - } - - fn set_var(&mut self, key: impl AsRef, value: impl AsRef) { - self.map - .insert(key.as_ref().to_owned(), value.as_ref().to_owned()); - } - - fn remove_var(&mut self, key: impl AsRef) { - self.map.remove(key.as_ref()); - } -} - -impl FetchEnv for TestEnv { - fn fetch>(&self, key: K) -> Result { - self.map - .get( - key.as_ref() - .to_str() - .ok_or_else(|| VarError::NotUnicode(key.as_ref().to_owned()))?, - ) - .ok_or(VarError::NotPresent) - .map(Clone::clone) - } -} - -fn test_env_factory() -> TestEnv { - let string_wrapper_json = "string"; - let string = "cool string"; - let data_json = r#"{"key": "key", "value": 34}"#; - let inner_json = r#"{"a": "", "b": 0}"#; - let mut env = TestEnv::new(); - env.set_var("CONF_STRING_WRAPPER", string_wrapper_json); - env.set_var("CONF_STRING", string); - env.set_var("CONF_DATA", data_json); - env.set_var("CONF_OPTIONAL_STRING_WRAPPER", string_wrapper_json); - env.set_var("CONF_OPTIONAL_STRING", string); - env.set_var("CONF_OPTIONAL_DATA", data_json); - env.set_var("CONF_OPTIONAL_INNER", inner_json); - env.set_var("CONF_INNER_A", "string"); - env.set_var("CONF_INNER_B", "42"); - env -} - -#[test] -fn test_proxy_load_from_env() { - let config = ConfigurationProxy::new_with_placeholders(); - let env_config = ConfigurationProxy::from_env(&test_env_factory()).expect("valid env"); - assert_eq!(&env_config.data, &config.data); - assert_eq!(&env_config.string_wrapper, &config.string_wrapper); - assert_eq!(&env_config.string, &config.string); - assert_eq!(&env_config.inner, &config.inner); -} - -#[test] -fn test_can_load_inner_without_the_wrapping_config() { - let mut env = test_env_factory(); - env.remove_var("CONF_OPTIONAL_INNER"); - let config = ConfigurationProxy::new_with_placeholders(); - let env_config = ConfigurationProxy::from_env(&env).expect("valid env"); - assert_eq!(&env_config.inner, &config.inner); -} - -#[test] -fn test_proxy_combine_does_not_overload_with_none() { - let config = ConfigurationProxy::new_with_none(); - let env_config = ConfigurationProxy::from_env(&test_env_factory()).expect("valid env"); - let combine_config = env_config.clone().override_with(config); - assert_eq!(&env_config.data, &combine_config.data); -} - -#[test] -fn configuration_proxy_from_env_returns_err_on_parsing_error() { - #[derive(LoadFromEnv, Debug)] - #[config(env_prefix = "")] - struct Target { - #[allow(dead_code)] - foo: Option, - } - - struct Env; - - impl FetchEnv for Env { - fn fetch>(&self, key: K) -> Result { - match key.as_ref().to_str().unwrap() { - "FOO" => Ok("not u64 for sure".to_owned()), - _ => Err(VarError::NotPresent), - } - } - } - - let err = Target::from_env(&Env).expect_err("Must not be parsed"); - let err = eyre::Report::new(err); - assert_eq!(format!("{err:?}"), "Failed to deserialize the field `FOO`\n\nCaused by:\n JSON5: --> 1:1\n |\n 1 | not u64 for sure\n | ^---\n |\n = expected array, boolean, null, number, object, or string\n\nLocation:\n config/base/tests/simple.rs:204:15"); -} diff --git a/config/src/block_sync.rs b/config/src/block_sync.rs index 6802fcce9c9..dd927df3ece 100644 --- a/config/src/block_sync.rs +++ b/config/src/block_sync.rs @@ -1,5 +1,5 @@ //! Module for `BlockSynchronizer`-related configuration and structs. -use iroha_config_base::derive::{Documented, Proxy}; +use iroha_config_base::derive::Proxy; use serde::{Deserialize, Serialize}; const DEFAULT_BLOCK_BATCH_SIZE: u32 = 4; @@ -7,7 +7,7 @@ const DEFAULT_GOSSIP_PERIOD_MS: u64 = 10000; const DEFAULT_ACTOR_CHANNEL_CAPACITY: u32 = 100; /// Configuration for `BlockSynchronizer`. -#[derive(Debug, Clone, Copy, PartialEq, Eq, Deserialize, Serialize, Documented, Proxy)] +#[derive(Debug, Clone, Copy, PartialEq, Eq, Deserialize, Serialize, Proxy)] #[serde(rename_all = "UPPERCASE")] #[config(env_prefix = "BLOCK_SYNC_")] pub struct Configuration { diff --git a/config/src/client.rs b/config/src/client.rs index 312bb2a1737..a9238879cac 100644 --- a/config/src/client.rs +++ b/config/src/client.rs @@ -4,7 +4,7 @@ use std::num::NonZeroU64; use derive_more::Display; use eyre::{Result, WrapErr}; -use iroha_config_base::derive::{Documented, Error as ConfigError, Proxy}; +use iroha_config_base::derive::{Error as ConfigError, Proxy}; use iroha_crypto::prelude::*; use iroha_data_model::{prelude::*, transaction::TransactionLimits}; use iroha_primitives::small::SmallStr; @@ -56,7 +56,7 @@ impl<'de> Deserialize<'de> for WebLogin { } /// Basic Authentication credentials -#[derive(Clone, Deserialize, Serialize, Debug, Documented, PartialEq, Eq)] +#[derive(Clone, Deserialize, Serialize, Debug, PartialEq, Eq)] pub struct BasicAuth { /// Login for Basic Authentication pub web_login: WebLogin, @@ -65,7 +65,7 @@ pub struct BasicAuth { } /// `Configuration` provides an ability to define client parameters such as `TORII_URL`. -#[derive(Debug, Clone, Deserialize, Serialize, Proxy, Documented, PartialEq, Eq)] +#[derive(Debug, Clone, Deserialize, Serialize, Proxy, PartialEq, Eq)] #[serde(rename_all = "UPPERCASE")] #[config(env_prefix = "IROHA_")] pub struct Configuration { diff --git a/config/src/genesis.rs b/config/src/genesis.rs index fe51c5e33a3..2bb9e8d892b 100644 --- a/config/src/genesis.rs +++ b/config/src/genesis.rs @@ -1,12 +1,12 @@ //! Module with genesis configuration logic. -use iroha_config_base::derive::{view, Documented, Proxy}; +use iroha_config_base::derive::{view, Proxy}; use iroha_crypto::{PrivateKey, PublicKey}; use serde::{Deserialize, Serialize}; // Generate `ConfigurationView` without the private key view! { /// Configuration of the genesis block and the process of its submission. - #[derive(Debug, Clone, PartialEq, Eq, Deserialize, Serialize, Documented, Proxy)] + #[derive(Debug, Clone, PartialEq, Eq, Deserialize, Serialize, Proxy)] #[serde(rename_all = "UPPERCASE")] #[config(env_prefix = "IROHA_GENESIS_")] pub struct Configuration { diff --git a/config/src/iroha.rs b/config/src/iroha.rs index 0ade3128196..6fc69f5069b 100644 --- a/config/src/iroha.rs +++ b/config/src/iroha.rs @@ -1,7 +1,7 @@ //! This module contains [`struct@Configuration`] structure and related implementation. use std::fmt::Debug; -use iroha_config_base::derive::{view, Documented, Error as ConfigError, Proxy}; +use iroha_config_base::derive::{view, Error as ConfigError, Proxy}; use iroha_crypto::prelude::*; use serde::{Deserialize, Serialize}; @@ -10,7 +10,7 @@ use super::*; // Generate `ConfigurationView` without the private key view! { /// Configuration parameters for a peer - #[derive(Debug, Clone, PartialEq, Eq, Deserialize, Serialize, Proxy, Documented)] + #[derive(Debug, Clone, PartialEq, Eq, Deserialize, Serialize, Proxy)] #[serde(rename_all = "UPPERCASE")] #[config(env_prefix = "IROHA_")] pub struct Configuration { diff --git a/config/src/kura.rs b/config/src/kura.rs index 03c0cb4fb74..5ce29c4ce95 100644 --- a/config/src/kura.rs +++ b/config/src/kura.rs @@ -1,13 +1,13 @@ //! Module for kura-related configuration and structs use eyre::Result; -use iroha_config_base::derive::{Documented, Proxy}; +use iroha_config_base::derive::Proxy; use serde::{Deserialize, Serialize}; const DEFAULT_BLOCK_STORE_PATH: &str = "./storage"; /// `Kura` configuration. -#[derive(Clone, Deserialize, Serialize, Debug, Documented, Proxy, PartialEq, Eq)] +#[derive(Clone, Deserialize, Serialize, Debug, Proxy, PartialEq, Eq)] #[serde(rename_all = "UPPERCASE")] #[config(env_prefix = "KURA_")] pub struct Configuration { diff --git a/config/src/live_query_store.rs b/config/src/live_query_store.rs index 79382fee2ca..de8b2a31ec2 100644 --- a/config/src/live_query_store.rs +++ b/config/src/live_query_store.rs @@ -2,7 +2,7 @@ use std::num::NonZeroU64; -use iroha_config_base::derive::{Documented, Proxy}; +use iroha_config_base::derive::Proxy; use serde::{Deserialize, Serialize}; /// Default max time a query can remain in the store unaccessed @@ -10,7 +10,7 @@ pub static DEFAULT_QUERY_IDLE_TIME_MS: once_cell::sync::Lazy = once_cell::sync::Lazy::new(|| NonZeroU64::new(30_000).unwrap()); /// Configuration for `QueryService`. -#[derive(Debug, Copy, Clone, PartialEq, Eq, Deserialize, Serialize, Documented, Proxy)] +#[derive(Debug, Copy, Clone, PartialEq, Eq, Deserialize, Serialize, Proxy)] #[serde(rename_all = "UPPERCASE")] #[config(env_prefix = "LIVE_QUERY_STORE_")] pub struct Configuration { diff --git a/config/src/logger.rs b/config/src/logger.rs index d3c8e79472a..6d5e4e9d5e6 100644 --- a/config/src/logger.rs +++ b/config/src/logger.rs @@ -2,7 +2,7 @@ //! configuration, as well as run-time reloading of the log-level. use core::fmt::Debug; -use iroha_config_base::derive::{Documented, Proxy}; +use iroha_config_base::derive::Proxy; pub use iroha_data_model::Level; #[cfg(feature = "tokio-console")] use iroha_primitives::addr::{socket_addr, SocketAddr}; @@ -23,7 +23,7 @@ pub fn into_tracing_level(level: Level) -> tracing::Level { } /// 'Logger' configuration. -#[derive(Debug, Clone, PartialEq, Eq, Deserialize, Serialize, Proxy, Documented)] +#[derive(Debug, Clone, PartialEq, Eq, Deserialize, Serialize, Proxy)] #[serde(rename_all = "UPPERCASE")] #[config(env_prefix = "LOG_")] // `tokio_console_addr` is not `Copy`, but warning appears without `tokio-console` feature diff --git a/config/src/network.rs b/config/src/network.rs index e5c5ec48e41..845743fac42 100644 --- a/config/src/network.rs +++ b/config/src/network.rs @@ -1,11 +1,11 @@ //! Module for network-related configuration and structs -use iroha_config_base::derive::{Documented, Proxy}; +use iroha_config_base::derive::Proxy; use serde::{Deserialize, Serialize}; const DEFAULT_ACTOR_CHANNEL_CAPACITY: u32 = 100; /// Network Configuration parameters -#[derive(Debug, Clone, Copy, PartialEq, Eq, Deserialize, Serialize, Documented, Proxy)] +#[derive(Debug, Clone, Copy, PartialEq, Eq, Deserialize, Serialize, Proxy)] #[serde(rename_all = "UPPERCASE")] #[config(env_prefix = "IROHA_NETWORK_")] pub struct Configuration { diff --git a/config/src/queue.rs b/config/src/queue.rs index 3dde85d60d1..5803e90ed7c 100644 --- a/config/src/queue.rs +++ b/config/src/queue.rs @@ -1,5 +1,5 @@ //! Module for `Queue`-related configuration and structs. -use iroha_config_base::derive::{Documented, Proxy}; +use iroha_config_base::derive::Proxy; use serde::{Deserialize, Serialize}; const DEFAULT_MAX_TRANSACTIONS_IN_QUEUE: u32 = 2_u32.pow(16); @@ -8,7 +8,7 @@ const DEFAULT_TRANSACTION_TIME_TO_LIVE_MS: u64 = 24 * 60 * 60 * 1000; // 24 hour const DEFAULT_FUTURE_THRESHOLD_MS: u64 = 1000; /// `Queue` configuration. -#[derive(Copy, Clone, Deserialize, Serialize, Debug, Documented, Proxy, PartialEq, Eq)] +#[derive(Copy, Clone, Deserialize, Serialize, Debug, Proxy, PartialEq, Eq)] #[serde(rename_all = "UPPERCASE")] #[config(env_prefix = "QUEUE_")] pub struct Configuration { diff --git a/config/src/snapshot.rs b/config/src/snapshot.rs index 484dfb7de3e..ea949340767 100644 --- a/config/src/snapshot.rs +++ b/config/src/snapshot.rs @@ -1,6 +1,6 @@ //! Module for `SnapshotMaker`-related configuration and structs. -use iroha_config_base::derive::{Documented, Proxy}; +use iroha_config_base::derive::Proxy; use serde::{Deserialize, Serialize}; const DEFAULT_SNAPSHOT_PATH: &str = "./storage"; @@ -9,7 +9,7 @@ const DEFAULT_SNAPSHOT_CREATE_EVERY_MS: u64 = 1000 * 60; const DEFAULT_ENABLED: bool = true; /// Configuration for `SnapshotMaker`. -#[derive(Debug, Clone, PartialEq, Eq, Deserialize, Serialize, Documented, Proxy)] +#[derive(Debug, Clone, PartialEq, Eq, Deserialize, Serialize, Proxy)] #[serde(rename_all = "UPPERCASE")] #[config(env_prefix = "SNAPSHOT_")] pub struct Configuration { diff --git a/config/src/sumeragi.rs b/config/src/sumeragi.rs index c6929d441d6..18698d634c6 100644 --- a/config/src/sumeragi.rs +++ b/config/src/sumeragi.rs @@ -2,7 +2,7 @@ use std::{fmt::Debug, fs::File, io::BufReader, path::Path}; use eyre::{Result, WrapErr}; -use iroha_config_base::derive::{view, Documented, Proxy}; +use iroha_config_base::derive::{view, Proxy}; use iroha_crypto::prelude::*; use iroha_data_model::prelude::*; use iroha_primitives::{unique_vec, unique_vec::UniqueVec}; @@ -36,7 +36,7 @@ view! { /// `Sumeragi` configuration. /// [`struct@Configuration`] provides an ability to define parameters such as `BLOCK_TIME_MS` /// and a list of `TRUSTED_PEERS`. - #[derive(Debug, Clone, PartialEq, Eq, Deserialize, Serialize, Proxy, Documented)] + #[derive(Debug, Clone, PartialEq, Eq, Deserialize, Serialize, Proxy)] #[serde(rename_all = "UPPERCASE")] #[config(env_prefix = "SUMERAGI_")] pub struct Configuration { diff --git a/config/src/telemetry.rs b/config/src/telemetry.rs index 3b04f79c483..b7ce10f9ee4 100644 --- a/config/src/telemetry.rs +++ b/config/src/telemetry.rs @@ -1,12 +1,12 @@ //! Module for telemetry-related configuration and structs. use std::path::PathBuf; -use iroha_config_base::derive::{Documented, Proxy}; +use iroha_config_base::derive::Proxy; use serde::{Deserialize, Serialize}; use url::Url; /// Configuration parameters container -#[derive(Clone, Deserialize, Serialize, Debug, Proxy, Documented, PartialEq, Eq)] +#[derive(Clone, Deserialize, Serialize, Debug, Proxy, PartialEq, Eq)] #[serde(rename_all = "UPPERCASE")] #[config(env_prefix = "TELEMETRY_")] pub struct Configuration { diff --git a/config/src/torii.rs b/config/src/torii.rs index 1c2b801e981..7dea529aa54 100644 --- a/config/src/torii.rs +++ b/config/src/torii.rs @@ -1,6 +1,6 @@ //! `Torii` configuration as well as the default values for the URLs used for the main endpoints: `p2p`, `telemetry`, but not `api`. -use iroha_config_base::derive::{Documented, Proxy}; +use iroha_config_base::derive::Proxy; use iroha_primitives::addr::{socket_addr, SocketAddr}; use serde::{Deserialize, Serialize}; @@ -14,7 +14,7 @@ pub const DEFAULT_TORII_MAX_CONTENT_LENGTH: u32 = 2_u32.pow(12) * 4000; /// Structure that defines the configuration parameters of `Torii` which is the routing module. /// For example the `p2p_addr`, which is used for consensus and block-synchronisation purposes, /// as well as `max_transaction_size`. -#[derive(Debug, Clone, PartialEq, Eq, Deserialize, Serialize, Documented, Proxy)] +#[derive(Debug, Clone, PartialEq, Eq, Deserialize, Serialize, Proxy)] #[serde(rename_all = "UPPERCASE")] #[config(env_prefix = "TORII_")] pub struct Configuration { diff --git a/config/src/wasm.rs b/config/src/wasm.rs index 0528da996ed..9e49f8d9391 100644 --- a/config/src/wasm.rs +++ b/config/src/wasm.rs @@ -1,5 +1,5 @@ //! Module for wasm-related configuration and structs. -use iroha_config_base::derive::{Documented, Proxy}; +use iroha_config_base::derive::Proxy; use serde::{Deserialize, Serialize}; use self::default::*; @@ -13,7 +13,7 @@ pub mod default { } /// `WebAssembly Runtime` configuration. -#[derive(Debug, Clone, Copy, PartialEq, Eq, Deserialize, Serialize, Documented, Proxy)] +#[derive(Debug, Clone, Copy, PartialEq, Eq, Deserialize, Serialize, Proxy)] #[config(env_prefix = "WASM_")] #[serde(rename_all = "UPPERCASE")] pub struct Configuration { diff --git a/config/src/wsv.rs b/config/src/wsv.rs index aacc58734be..dcb23b23d85 100644 --- a/config/src/wsv.rs +++ b/config/src/wsv.rs @@ -1,6 +1,6 @@ //! Module for `WorldStateView`-related configuration and structs. use default::*; -use iroha_config_base::derive::{Documented, Proxy}; +use iroha_config_base::derive::Proxy; use iroha_data_model::{prelude::*, transaction::TransactionLimits}; use serde::{Deserialize, Serialize}; @@ -26,7 +26,7 @@ pub mod default { } /// `WorldStateView` configuration. -#[derive(Debug, Clone, Copy, PartialEq, Eq, Deserialize, Serialize, Proxy, Documented)] +#[derive(Debug, Clone, Copy, PartialEq, Eq, Deserialize, Serialize, Proxy)] #[config(env_prefix = "WSV_")] #[serde(rename_all = "UPPERCASE")] pub struct Configuration { diff --git a/docs/README.md b/docs/README.md index 11e941c5e8f..b8aa69d85a4 100644 --- a/docs/README.md +++ b/docs/README.md @@ -4,7 +4,7 @@ This is the main Iroha 2 documentation that you will find useful: - [Tutorial](https://hyperledger.github.io/iroha-2-docs/) - [API Reference](https://hyperledger.github.io/iroha-2-docs/api/torii-endpoints) -- [Configuration Reference](./source/references/config.md) + - [Iroha 2 Whitepaper](./source/iroha_2_whitepaper.md) ## Tools @@ -18,5 +18,6 @@ Documentation for Iroha 2 tools: ## Development The following is useful for development: + - [Hot reload Iroha in a Docker container](./source/guides/hot-reload.md) - [Benchmark your code](../client/benches/tps/README.md) diff --git a/docs/source/references/config.md b/docs/source/references/config.md deleted file mode 100644 index 82b00f0bc54..00000000000 --- a/docs/source/references/config.md +++ /dev/null @@ -1,835 +0,0 @@ -# Iroha Configuration reference - -In this document we provide a reference and detailed descriptions of Iroha's configuration options. The options have different underlying types and default values, which are denoted in code as types wrapped in a single `Option<..>` or in a double `Option>`. For the detailed explanation, please refer to this [section](#configuration-types). - -## Configuration types - -### `Option<..>` - -A type wrapped in a single `Option<..>` signifies that in the corresponding `json` block there is a fallback value for this type, and that it only serves as a reference. If a default for such a type has a `null` value, it means that there is no meaningful fallback available for this particular value. - -All the default values can be freely obtained from a provided [sample configuration file](../../../configs/peer/config.json), but it should only serve as a starting point. If left unchanged, the sample configuration file would still fail to build due to it having `null` in place of [public](#public_key) and [private](#private_key) keys as well as [API endpoint URL](#torii.api_url). These should be provided either by modifying the sample config file or as environment variables. No other overloading of configuration values happens besides reading them from a file and capturing the environment variables. - -For both types of configuration options wrapped in a single `Option<..>` (i.e. both those that have meaningful defaults and those that have `null`), failure to provide them in any of the above two ways results in an error. - -### `Option>` - -`Option>` types should be distinguished from types wrapped in a single `Option<..>`. Only the double option ones are allowed to stay `null`, meaning that **not** providing them in an environment variable or a file will **not** result in an error. - -Thus, only these types are truly optional in the mundane sense of the word. An example of this distinction is genesis [public](#genesis.account_public_key) and [private](#genesis.account_private_key) key. While the first one is a single `Option<..>` wrapped type, the latter is wrapped in `Option>`. This means that the genesis *public* key should always be provided by the user, be it via a file config or an environment variable, whereas the *private* key is only needed for the peer that submits the genesis block, and can be omitted for all others. The same logic goes for other double option fields such as logger file path. - -### Sumeragi: default `null` values - -A special note about sumeragi fields with `null` as default: only the [`trusted_peers`](#sumeragi.trusted_peers) field out of the three can be initialized via a provided file or an environment variable. - -The other two fields, namely [`key_pair`](#sumeragi.key_pair) and [`peer_id`](#sumeragi.peer_id), go through a process of finalization where their values are derived from the corresponding ones in the uppermost Iroha config (using its [`public_key`](#public_key) and [`private_key`](#private_key) fields) or the Torii config (via its [`p2p_addr`](#torii.p2p_addr)). This ensures that these linked fields stay in sync, and prevents the programmer error when different values are provided to these field pairs. Providing either `sumeragi.key_pair` or `sumeragi.peer_id` by hand will result in an error, as it should never be done directly. - -## Default configuration - -The following is the default configuration used by Iroha. - -```json -{ - "PUBLIC_KEY": null, - "PRIVATE_KEY": null, - "DISABLE_PANIC_TERMINAL_COLORS": false, - "KURA": { - "INIT_MODE": "strict", - "BLOCK_STORE_PATH": "./storage", - "DEBUG_OUTPUT_NEW_BLOCKS": false - }, - "SUMERAGI": { - "KEY_PAIR": null, - "PEER_ID": null, - "BLOCK_TIME_MS": 2000, - "TRUSTED_PEERS": null, - "COMMIT_TIME_LIMIT_MS": 4000, - "MAX_TRANSACTIONS_IN_BLOCK": 512, - "ACTOR_CHANNEL_CAPACITY": 100, - "GOSSIP_BATCH_SIZE": 500, - "GOSSIP_PERIOD_MS": 1000 - }, - "TORII": { - "P2P_ADDR": null, - "API_URL": null, - "MAX_TRANSACTION_SIZE": 32768, - "MAX_CONTENT_LEN": 16384000 - }, - "BLOCK_SYNC": { - "GOSSIP_PERIOD_MS": 10000, - "BLOCK_BATCH_SIZE": 4, - "ACTOR_CHANNEL_CAPACITY": 100 - }, - "QUEUE": { - "MAX_TRANSACTIONS_IN_QUEUE": 65536, - "MAX_TRANSACTIONS_IN_QUEUE_PER_USER": 65536, - "TRANSACTION_TIME_TO_LIVE_MS": 86400000, - "FUTURE_THRESHOLD_MS": 1000 - }, - "LOGGER": { - "LEVEL": "INFO", - "FORMAT": "full" - }, - "GENESIS": { - "ACCOUNT_PUBLIC_KEY": null, - "ACCOUNT_PRIVATE_KEY": null - }, - "WSV": { - "ASSET_METADATA_LIMITS": { - "max_len": 1048576, - "max_entry_byte_size": 4096 - }, - "ASSET_DEFINITION_METADATA_LIMITS": { - "max_len": 1048576, - "max_entry_byte_size": 4096 - }, - "ACCOUNT_METADATA_LIMITS": { - "max_len": 1048576, - "max_entry_byte_size": 4096 - }, - "DOMAIN_METADATA_LIMITS": { - "max_len": 1048576, - "max_entry_byte_size": 4096 - }, - "IDENT_LENGTH_LIMITS": { - "min": 1, - "max": 128 - }, - "TRANSACTION_LIMITS": { - "max_instruction_number": 4096, - "max_wasm_size_bytes": 4194304 - }, - "WASM_RUNTIME_CONFIG": { - "FUEL_LIMIT": 23000000, - "MAX_MEMORY": 524288000 - } - }, - "NETWORK": { - "ACTOR_CHANNEL_CAPACITY": 100 - }, - "TELEMETRY": { - "NAME": null, - "URL": null, - "MIN_RETRY_PERIOD": 1, - "MAX_RETRY_DELAY_EXPONENT": 4, - "FILE": null - }, - "SNAPSHOT": { - "CREATE_EVERY_MS": 60000, - "DIR_PATH": "./storage", - "CREATION_ENABLED": true - }, - "LIVE_QUERY_STORE": { - "QUERY_IDLE_TIME_MS": 30000 - } -} -``` - -## `block_sync` - -`BlockSynchronizer` configuration - -Has type `Option`[^1]. Can be configured via environment variable `IROHA_BLOCK_SYNC` - -```json -{ - "ACTOR_CHANNEL_CAPACITY": 100, - "BLOCK_BATCH_SIZE": 4, - "GOSSIP_PERIOD_MS": 10000 -} -``` - -### `block_sync.actor_channel_capacity` - -Buffer capacity of actor's MPSC channel - -Has type `Option`[^1]. Can be configured via environment variable `BLOCK_SYNC_ACTOR_CHANNEL_CAPACITY` - -```json -100 -``` - -### `block_sync.block_batch_size` - -The number of blocks that can be sent in one message. - -Has type `Option`[^1]. Can be configured via environment variable `BLOCK_SYNC_BLOCK_BATCH_SIZE` - -```json -4 -``` - -### `block_sync.gossip_period_ms` - -The period of time to wait between sending requests for the latest block. - -Has type `Option`[^1]. Can be configured via environment variable `BLOCK_SYNC_GOSSIP_PERIOD_MS` - -```json -10000 -``` - -## `disable_panic_terminal_colors` - -Disable coloring of the backtrace and error report on panic - -Has type `Option`[^1]. Can be configured via environment variable `IROHA_DISABLE_PANIC_TERMINAL_COLORS` - -```json -false -``` - -## `genesis` - -`GenesisBlock` configuration - -Has type `Option>`[^1]. Can be configured via environment variable `IROHA_GENESIS` - -```json -{ - "ACCOUNT_PRIVATE_KEY": null, - "ACCOUNT_PUBLIC_KEY": null -} -``` - -### `genesis.account_private_key` - -The private key of the genesis account, only needed for the peer that submits the genesis block. - -Has type `Option>`[^1]. Can be configured via environment variable `IROHA_GENESIS_ACCOUNT_PRIVATE_KEY` - -```json -null -``` - -### `genesis.account_public_key` - -The public key of the genesis account, should be supplied to all peers. - -Has type `Option`[^1]. Can be configured via environment variable `IROHA_GENESIS_ACCOUNT_PUBLIC_KEY` - -```json -null -``` - -## `kura` - -`Kura` configuration - -Has type `Option>`[^1]. Can be configured via environment variable `IROHA_KURA` - -```json -{ - "BLOCK_STORE_PATH": "./storage", - "DEBUG_OUTPUT_NEW_BLOCKS": false, - "INIT_MODE": "strict" -} -``` - -### `kura.block_store_path` - -Path to the existing block store folder or path to create new folder. - -Has type `Option`[^1]. Can be configured via environment variable `KURA_BLOCK_STORE_PATH` - -```json -"./storage" -``` - -### `kura.debug_output_new_blocks` - -Whether or not new blocks be outputted to a file called blocks.json. - -Has type `Option`[^1]. Can be configured via environment variable `KURA_DEBUG_OUTPUT_NEW_BLOCKS` - -```json -false -``` - -### `kura.init_mode` - -Initialization mode: `strict` or `fast`. - -Has type `Option`[^1]. Can be configured via environment variable `KURA_INIT_MODE` - -```json -"strict" -``` - -## `live_query_store` - -LiveQueryStore configuration - -Has type `Option`[^1]. Can be configured via environment variable `IROHA_LIVE_QUERY_STORE` - -```json -{ - "QUERY_IDLE_TIME_MS": 30000 -} -``` - -### `live_query_store.query_idle_time_ms` - -Time query can remain in the store if unaccessed - -Has type `Option`[^1]. Can be configured via environment variable `LIVE_QUERY_STORE_QUERY_IDLE_TIME_MS` - -```json -30000 -``` - -## `logger` - -`Logger` configuration - -Has type `Option>`[^1]. Can be configured via environment variable `IROHA_LOGGER` - -```json -{ - "FORMAT": "full", - "LEVEL": "INFO" -} -``` - -### `logger.format` - -Output format - -Has type `Option`[^1]. Can be configured via environment variable `LOG_FORMAT` - -```json -"full" -``` - -### `logger.level` - -Level of logging verbosity - -Has type `Option`[^1]. Can be configured via environment variable `LOG_LEVEL` - -```json -"INFO" -``` - -## `network` - -Network configuration - -Has type `Option`[^1]. Can be configured via environment variable `IROHA_NETWORK` - -```json -{ - "ACTOR_CHANNEL_CAPACITY": 100 -} -``` - -### `network.actor_channel_capacity` - -Buffer capacity of actor's MPSC channel - -Has type `Option`[^1]. Can be configured via environment variable `IROHA_NETWORK_ACTOR_CHANNEL_CAPACITY` - -```json -100 -``` - -## `private_key` - -Private key of this peer - -Has type `Option`[^1]. Can be configured via environment variable `IROHA_PRIVATE_KEY` - -```json -null -``` - -## `public_key` - -Public key of this peer - -Has type `Option`[^1]. Can be configured via environment variable `IROHA_PUBLIC_KEY` - -```json -null -``` - -## `queue` - -`Queue` configuration - -Has type `Option`[^1]. Can be configured via environment variable `IROHA_QUEUE` - -```json -{ - "FUTURE_THRESHOLD_MS": 1000, - "MAX_TRANSACTIONS_IN_QUEUE": 65536, - "MAX_TRANSACTIONS_IN_QUEUE_PER_USER": 65536, - "TRANSACTION_TIME_TO_LIVE_MS": 86400000 -} -``` - -### `queue.future_threshold_ms` - -The threshold to determine if a transaction has been tampered to have a future timestamp. - -Has type `Option`[^1]. Can be configured via environment variable `QUEUE_FUTURE_THRESHOLD_MS` - -```json -1000 -``` - -### `queue.max_transactions_in_queue` - -The upper limit of the number of transactions waiting in the queue. - -Has type `Option`[^1]. Can be configured via environment variable `QUEUE_MAX_TRANSACTIONS_IN_QUEUE` - -```json -65536 -``` - -### `queue.max_transactions_in_queue_per_user` - -The upper limit of the number of transactions waiting in the queue for single user. - -Has type `Option`[^1]. Can be configured via environment variable `QUEUE_MAX_TRANSACTIONS_IN_QUEUE_PER_USER` - -```json -65536 -``` - -### `queue.transaction_time_to_live_ms` - -The transaction will be dropped after this time if it is still in the queue. - -Has type `Option`[^1]. Can be configured via environment variable `QUEUE_TRANSACTION_TIME_TO_LIVE_MS` - -```json -86400000 -``` - -## `snapshot` - -SnapshotMaker configuration - -Has type `Option>`[^1]. Can be configured via environment variable `IROHA_SNAPSHOT` - -```json -{ - "CREATE_EVERY_MS": 60000, - "CREATION_ENABLED": true, - "DIR_PATH": "./storage" -} -``` - -### `snapshot.create_every_ms` - -The period of time to wait between attempts to create new snapshot. - -Has type `Option`[^1]. Can be configured via environment variable `SNAPSHOT_CREATE_EVERY_MS` - -```json -60000 -``` - -### `snapshot.creation_enabled` - -Flag to enable or disable snapshot creation - -Has type `Option`[^1]. Can be configured via environment variable `SNAPSHOT_CREATION_ENABLED` - -```json -true -``` - -### `snapshot.dir_path` - -Path to the directory where snapshots should be stored - -Has type `Option`[^1]. Can be configured via environment variable `SNAPSHOT_DIR_PATH` - -```json -"./storage" -``` - -## `sumeragi` - -`Sumeragi` configuration - -Has type `Option>`[^1]. Can be configured via environment variable `IROHA_SUMERAGI` - -```json -{ - "ACTOR_CHANNEL_CAPACITY": 100, - "BLOCK_TIME_MS": 2000, - "COMMIT_TIME_LIMIT_MS": 4000, - "GOSSIP_BATCH_SIZE": 500, - "GOSSIP_PERIOD_MS": 1000, - "KEY_PAIR": null, - "MAX_TRANSACTIONS_IN_BLOCK": 512, - "PEER_ID": null, - "TRUSTED_PEERS": null -} -``` - -### `sumeragi.actor_channel_capacity` - -Buffer capacity of actor's MPSC channel - -Has type `Option`[^1]. Can be configured via environment variable `SUMERAGI_ACTOR_CHANNEL_CAPACITY` - -```json -100 -``` - -### `sumeragi.block_time_ms` - -The period of time a peer waits for the `CreatedBlock` message after getting a `TransactionReceipt` - -Has type `Option`[^1]. Can be configured via environment variable `SUMERAGI_BLOCK_TIME_MS` - -```json -2000 -``` - -### `sumeragi.commit_time_limit_ms` - -The period of time a peer waits for `CommitMessage` from the proxy tail. - -Has type `Option`[^1]. Can be configured via environment variable `SUMERAGI_COMMIT_TIME_LIMIT_MS` - -```json -4000 -``` - -### `sumeragi.gossip_batch_size` - -max number of transactions in tx gossip batch message. While configuring this, pay attention to `p2p` max message size. - -Has type `Option`[^1]. Can be configured via environment variable `SUMERAGI_GOSSIP_BATCH_SIZE` - -```json -500 -``` - -### `sumeragi.gossip_period_ms` - -Period in milliseconds for pending transaction gossiping between peers. - -Has type `Option`[^1]. Can be configured via environment variable `SUMERAGI_GOSSIP_PERIOD_MS` - -```json -1000 -``` - -### `sumeragi.key_pair` - -The key pair consisting of a private and a public key. - -Has type `Option`[^1]. Can be configured via environment variable `SUMERAGI_KEY_PAIR` - -```json -null -``` - -### `sumeragi.max_transactions_in_block` - -The upper limit of the number of transactions per block. - -Has type `Option`[^1]. Can be configured via environment variable `SUMERAGI_MAX_TRANSACTIONS_IN_BLOCK` - -```json -512 -``` - -### `sumeragi.peer_id` - -Current Peer Identification. - -Has type `Option`[^1]. Can be configured via environment variable `SUMERAGI_PEER_ID` - -```json -null -``` - -### `sumeragi.trusted_peers` - -Optional list of predefined trusted peers. - -Has type `Option`[^1]. Can be configured via environment variable `SUMERAGI_TRUSTED_PEERS` - -```json -null -``` - -## `telemetry` - -Telemetry configuration - -Has type `Option>`[^1]. Can be configured via environment variable `IROHA_TELEMETRY` - -```json -{ - "FILE": null, - "MAX_RETRY_DELAY_EXPONENT": 4, - "MIN_RETRY_PERIOD": 1, - "NAME": null, - "URL": null -} -``` - -### `telemetry.file` - -The filepath that to write dev-telemetry to - -Has type `Option>`[^1]. Can be configured via environment variable `TELEMETRY_FILE` - -```json -null -``` - -### `telemetry.max_retry_delay_exponent` - -The maximum exponent of 2 that is used for increasing delay between reconnections - -Has type `Option`[^1]. Can be configured via environment variable `TELEMETRY_MAX_RETRY_DELAY_EXPONENT` - -```json -4 -``` - -### `telemetry.min_retry_period` - -The minimum period of time in seconds to wait before reconnecting - -Has type `Option`[^1]. Can be configured via environment variable `TELEMETRY_MIN_RETRY_PERIOD` - -```json -1 -``` - -### `telemetry.name` - -The node's name to be seen on the telemetry - -Has type `Option>`[^1]. Can be configured via environment variable `TELEMETRY_NAME` - -```json -null -``` - -### `telemetry.url` - -The url of the telemetry, e.g., ws://127.0.0.1:8001/submit - -Has type `Option>`[^1]. Can be configured via environment variable `TELEMETRY_URL` - -```json -null -``` - -## `torii` - -`Torii` configuration - -Has type `Option>`[^1]. Can be configured via environment variable `IROHA_TORII` - -```json -{ - "API_URL": null, - "MAX_CONTENT_LEN": 16384000, - "MAX_TRANSACTION_SIZE": 32768, - "P2P_ADDR": null -} -``` - -### `torii.api_url` - -Torii address for client API. - -Has type `Option`[^1]. Can be configured via environment variable `TORII_API_URL` - -```json -null -``` - -### `torii.max_content_len` - -Maximum number of bytes in raw message. Used to prevent from DOS attacks. - -Has type `Option`[^1]. Can be configured via environment variable `TORII_MAX_CONTENT_LEN` - -```json -16384000 -``` - -### `torii.max_transaction_size` - -Maximum number of bytes in raw transaction. Used to prevent from DOS attacks. - -Has type `Option`[^1]. Can be configured via environment variable `TORII_MAX_TRANSACTION_SIZE` - -```json -32768 -``` - -### `torii.p2p_addr` - -Torii address for p2p communication for consensus and block synchronization purposes. - -Has type `Option`[^1]. Can be configured via environment variable `TORII_P2P_ADDR` - -```json -null -``` - -## `wsv` - -`WorldStateView` configuration - -Has type `Option>`[^1]. Can be configured via environment variable `IROHA_WSV` - -```json -{ - "ACCOUNT_METADATA_LIMITS": { - "max_entry_byte_size": 4096, - "max_len": 1048576 - }, - "ASSET_DEFINITION_METADATA_LIMITS": { - "max_entry_byte_size": 4096, - "max_len": 1048576 - }, - "ASSET_METADATA_LIMITS": { - "max_entry_byte_size": 4096, - "max_len": 1048576 - }, - "DOMAIN_METADATA_LIMITS": { - "max_entry_byte_size": 4096, - "max_len": 1048576 - }, - "IDENT_LENGTH_LIMITS": { - "max": 128, - "min": 1 - }, - "TRANSACTION_LIMITS": { - "max_instruction_number": 4096, - "max_wasm_size_bytes": 4194304 - }, - "WASM_RUNTIME_CONFIG": { - "FUEL_LIMIT": 23000000, - "MAX_MEMORY": 524288000 - } -} -``` - -### `wsv.account_metadata_limits` - -[`MetadataLimits`] of any account metadata. - -Has type `Option`[^1]. Can be configured via environment variable `WSV_ACCOUNT_METADATA_LIMITS` - -```json -{ - "max_entry_byte_size": 4096, - "max_len": 1048576 -} -``` - -### `wsv.asset_definition_metadata_limits` - -[`MetadataLimits`] of any asset definition metadata. - -Has type `Option`[^1]. Can be configured via environment variable `WSV_ASSET_DEFINITION_METADATA_LIMITS` - -```json -{ - "max_entry_byte_size": 4096, - "max_len": 1048576 -} -``` - -### `wsv.asset_metadata_limits` - -[`MetadataLimits`] for every asset with store. - -Has type `Option`[^1]. Can be configured via environment variable `WSV_ASSET_METADATA_LIMITS` - -```json -{ - "max_entry_byte_size": 4096, - "max_len": 1048576 -} -``` - -### `wsv.domain_metadata_limits` - -[`MetadataLimits`] of any domain metadata. - -Has type `Option`[^1]. Can be configured via environment variable `WSV_DOMAIN_METADATA_LIMITS` - -```json -{ - "max_entry_byte_size": 4096, - "max_len": 1048576 -} -``` - -### `wsv.ident_length_limits` - -[`LengthLimits`] for the number of chars in identifiers that can be stored in the WSV. - -Has type `Option`[^1]. Can be configured via environment variable `WSV_IDENT_LENGTH_LIMITS` - -```json -{ - "max": 128, - "min": 1 -} -``` - -### `wsv.transaction_limits` - -Limits that all transactions need to obey, in terms of size - -Has type `Option`[^1]. Can be configured via environment variable `WSV_TRANSACTION_LIMITS` - -```json -{ - "max_instruction_number": 4096, - "max_wasm_size_bytes": 4194304 -} -``` - -### `wsv.wasm_runtime_config` - -WASM runtime configuration - -Has type `Option`[^1]. Can be configured via environment variable `WSV_WASM_RUNTIME_CONFIG` - -```json -{ - "FUEL_LIMIT": 23000000, - "MAX_MEMORY": 524288000 -} -``` - -#### `wsv.wasm_runtime_config.fuel_limit` - -The fuel limit determines the maximum number of instructions that can be executed within a smart contract. - -Has type `Option`[^1]. Can be configured via environment variable `WASM_FUEL_LIMIT` - -```json -23000000 -``` - -#### `wsv.wasm_runtime_config.max_memory` - -Maximum amount of linear memory a given smart contract can allocate. - -Has type `Option`[^1]. Can be configured via environment variable `WASM_MAX_MEMORY` - -```json -524288000 -``` - diff --git a/hooks/pre-commit.sample b/hooks/pre-commit.sample index fdf09848d91..72addb9c6b5 100755 --- a/hooks/pre-commit.sample +++ b/hooks/pre-commit.sample @@ -2,7 +2,6 @@ set -e cargo +nightly fmt --all -- --check cargo +nightly lints clippy --workspace --benches --tests --examples --all-features -cargo run --bin kagami -- docs >docs/source/references/config.md cargo run --bin kagami -- genesis >configs/peer/genesis.json cargo run --bin kagami -- schema >docs/source/references/schema.json -git add docs/source/references/config.md configs/peer/genesis.json docs/source/references/schema.json +git add configs/peer/genesis.json docs/source/references/schema.json diff --git a/scripts/tests/consistency.sh b/scripts/tests/consistency.sh index dd5a5291a5c..bf5873f2f81 100755 --- a/scripts/tests/consistency.sh +++ b/scripts/tests/consistency.sh @@ -2,11 +2,6 @@ set -e case $1 in - "docs") - cargo run --release --bin kagami -- docs | diff - docs/source/references/config.md || { - echo 'Please re-generate docs using `cargo run --release --bin kagami -- docs > docs/source/references/config.md`' - exit 1 - };; "genesis") cargo run --release --bin kagami -- genesis --executor-path-in-genesis ./executor.wasm | diff - configs/peer/genesis.json || { echo 'Please re-generate the genesis with `cargo run --release --bin kagami -- genesis --executor-path-in-genesis ./executor.wasm > configs/peer/genesis.json`' diff --git a/tools/kagami/src/docs.rs b/tools/kagami/src/docs.rs deleted file mode 100644 index 737959c5aef..00000000000 --- a/tools/kagami/src/docs.rs +++ /dev/null @@ -1,130 +0,0 @@ -use std::{fmt::Debug, io::Write}; - -use color_eyre::eyre::WrapErr as _; -use iroha_config::{base::proxy::Documented, iroha::ConfigurationProxy}; -use serde_json::Value; - -use super::*; - -impl + Send + Sync + Default> PrintDocs for C {} - -#[derive(ClapArgs, Debug, Clone, Copy)] -pub struct Args; - -impl RunArgs for Args { - fn run(self, writer: &mut BufWriter) -> crate::Outcome { - ConfigurationProxy::get_markdown(writer).wrap_err("Failed to generate documentation") - } -} - -pub trait PrintDocs: Documented + Send + Sync + Default -where - Self::Error: Debug, -{ - fn get_markdown(writer: &mut W) -> color_eyre::Result<()> { - let Value::Object(docs) = Self::get_docs() else { - unreachable!("Top level structure is always an object") - }; - let mut vec = Vec::new(); - let defaults = serde_json::to_string_pretty(&Self::default())?; - - writeln!(writer, "# Iroha Configuration reference\n")?; - writeln!(writer, "In this document we provide a reference and detailed descriptions of Iroha's configuration options. \ - The options have different underlying types and default values, which are denoted in code as types wrapped in a single \ - `Option<..>` or in a double `Option>`. For the detailed explanation, please refer to \ - this [section](#configuration-types).\n")?; - writeln!( - writer, - "## Configuration types\n\n\ - ### `Option<..>`\n\n\ - A type wrapped in a single `Option<..>` signifies that in the corresponding `json` block there is a fallback value for this type, \ - and that it only serves as a reference. If a default for such a type has a `null` value, it means that there is no meaningful fallback \ - available for this particular value.\n\nAll the default values can be freely obtained from a provided [sample configuration file](../../../configs/peer/config.json), \ - but it should only serve as a starting point. If left unchanged, the sample configuration file would still fail to build due to it having `null` in place of \ - [public](#public_key) and [private](#private_key) keys as well as [API endpoint URL](#torii.api_url). \ - These should be provided either by modifying the sample config file or as environment variables. \ - No other overloading of configuration values happens besides reading them from a file and capturing the environment variables.\n\n\ - For both types of configuration options wrapped in a single `Option<..>` (i.e. both those that have meaningful defaults and those that have `null`), \ - failure to provide them in any of the above two ways results in an error.\n\n\ - ### `Option>`\n\n\ - `Option>` types should be distinguished from types wrapped in a single `Option<..>`. Only the double option ones are allowed to stay `null`, \ - meaning that **not** providing them in an environment variable or a file will **not** result in an error.\n\n\ - Thus, only these types are truly optional in the mundane sense of the word. \ - An example of this distinction is genesis [public](#genesis.account_public_key) and [private](#genesis.account_private_key) key. \ - While the first one is a single `Option<..>` wrapped type, the latter is wrapped in `Option>`. This means that the genesis *public* key should always be \ - provided by the user, be it via a file config or an environment variable, whereas the *private* key is only needed for the peer that submits the genesis block, \ - and can be omitted for all others. The same logic goes for other double option fields such as logger file path.\n\n\ - ### Sumeragi: default `null` values\n\n\ - A special note about sumeragi fields with `null` as default: only the [`trusted_peers`](#sumeragi.trusted_peers) field out of the three can be initialized via a \ - provided file or an environment variable.\n\n\ - The other two fields, namely [`key_pair`](#sumeragi.key_pair) and [`peer_id`](#sumeragi.peer_id), go through a process of finalization where their values \ - are derived from the corresponding ones in the uppermost Iroha config (using its [`public_key`](#public_key) and [`private_key`](#private_key) fields) \ - or the Torii config (via its [`p2p_addr`](#torii.p2p_addr)). \ - This ensures that these linked fields stay in sync, and prevents the programmer error when different values are provided to these field pairs. \ - Providing either `sumeragi.key_pair` or `sumeragi.peer_id` by hand will result in an error, as it should never be done directly.\n" - )?; - writeln!(writer, "## Default configuration\n")?; - writeln!( - writer, - "The following is the default configuration used by Iroha.\n" - )?; - writeln!(writer, "```json\n{defaults}\n```\n")?; - Self::get_markdown_with_depth(writer, &docs, &mut vec, 2)?; - Ok(()) - } - - fn get_markdown_with_depth( - writer: &mut W, - docs: &serde_json::Map, - field: &mut Vec, - depth: usize, - ) -> color_eyre::Result<()> { - let current_field = { - let mut docs = docs; - for f in &*field { - docs = match &docs[f] { - Value::Object(obj) => obj, - _ => unreachable!(), - }; - } - docs - }; - - for (f, value) in current_field { - field.push(f.clone()); - let get_field = field.iter().map(AsRef::as_ref).collect::>(); - let (doc, inner) = match value { - Value::Object(_) => { - let doc = Self::get_doc_recursive(&get_field) - .expect("Should be there, as already in docs"); - (doc.unwrap_or_default(), true) - } - Value::String(s) => (s.clone(), false), - _ => unreachable!("Only strings and objects in docs"), - }; - // Hacky workaround to avoid duplicating inner fields docs in the reference - let doc = doc.lines().take(3).collect::>().join("\n"); - let doc = doc.strip_prefix(' ').unwrap_or(&doc); - let defaults = Self::default() - .get_recursive(get_field) - .expect("Failed to get defaults."); - let defaults = serde_json::to_string_pretty(&defaults)?; - let field_str = field - .join(".") - .chars() - .filter(|&chr| chr != ' ') - .collect::(); - - write!(writer, "{} `{}`\n\n", "#".repeat(depth), field_str)?; - write!(writer, "{doc}\n\n")?; - write!(writer, "```json\n{defaults}\n```\n\n")?; - - if inner { - Self::get_markdown_with_depth(writer, docs, field, depth + 1)?; - } - - field.pop(); - } - Ok(()) - } -} diff --git a/tools/kagami/src/main.rs b/tools/kagami/src/main.rs index f4ed9d40733..87ef8774f12 100644 --- a/tools/kagami/src/main.rs +++ b/tools/kagami/src/main.rs @@ -12,7 +12,6 @@ use iroha_data_model::prelude::*; mod config; mod crypto; -mod docs; mod genesis; mod schema; @@ -54,8 +53,6 @@ enum Args { Genesis(genesis::Args), /// Generate the default client/peer configuration Config(config::Args), - /// Generate a Markdown reference of configuration parameters - Docs(Box), } impl RunArgs for Args { @@ -67,7 +64,6 @@ impl RunArgs for Args { Schema(args) => args.run(writer), Genesis(args) => args.run(writer), Config(args) => args.run(writer), - Docs(args) => args.run(writer), } } }