diff --git a/.github/workflows/typos.yml b/.github/workflows/typos.yml index 5dfb324ecc..bfe2701855 100644 --- a/.github/workflows/typos.yml +++ b/.github/workflows/typos.yml @@ -20,4 +20,4 @@ jobs: name: Checkout Repository - name: typos-action - uses: crate-ci/typos@v1.28.1 + uses: crate-ci/typos@v1.28.2 diff --git a/Cargo.lock b/Cargo.lock index ea35ecc6d2..a97145b4fc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -175,9 +175,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.93" +version = "1.0.94" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c95c10ba0b00a02636238b814946408b1322d5ac4760326e6fb8ec956d85775" +checksum = "c1fd03a028ef38ba2276dce7e33fcd6369c158a1bca17946c4b1b701891c1ff7" [[package]] name = "arbitrary" @@ -453,7 +453,7 @@ dependencies = [ "rand 0.8.5", "sha2 0.10.8", "tracing", - "tracing-subscriber 0.3.18", + "tracing-subscriber 0.3.19", "ureq", ] @@ -499,7 +499,7 @@ dependencies = [ "num-traits", "rusticata-macros", "thiserror 1.0.68", - "time 0.3.36", + "time 0.3.37", ] [[package]] @@ -510,7 +510,7 @@ checksum = "965c2d33e53cb6b267e148a4cb0760bc01f4904c1cd4bb4002a085bb016d1490" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", "synstructure", ] @@ -522,7 +522,7 @@ checksum = "7b18050c2cd6fe86c3a76584ef5e0baf286d038cda203eb6223df2cc413565f7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -801,7 +801,7 @@ checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -831,7 +831,7 @@ checksum = "721cae7de5c34fbb2acd27e21e6d2cf7b886dce0c27388d46c4e6c47ea4318dd" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -916,7 +916,7 @@ checksum = "edf3ee19dbc0a46d740f6f0926bde8c50f02bdbc7b536842da28f6ac56513a8b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -1041,7 +1041,7 @@ dependencies = [ "regex", "rustc-hash 1.1.0", "shlex", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -1251,7 +1251,7 @@ dependencies = [ "rkyv", "tokio", "tracing", - "tracing-subscriber 0.3.18", + "tracing-subscriber 0.3.19", ] [[package]] @@ -1267,7 +1267,7 @@ dependencies = [ "rand 0.8.5", "tokio", "tracing", - "tracing-subscriber 0.3.18", + "tracing-subscriber 0.3.19", ] [[package]] @@ -1280,7 +1280,7 @@ dependencies = [ "jf-signature", "tokio", "tracing", - "tracing-subscriber 0.3.18", + "tracing-subscriber 0.3.19", ] [[package]] @@ -1374,9 +1374,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.21" +version = "4.5.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb3b4b9e5a7c7514dfa52869339ee98b3156b0bfb4e8a77c4ff4babb64b1604f" +checksum = "69371e34337c4c984bbe322360c2547210bf632eb2814bbe78a6e87a2935bd2b" dependencies = [ "clap_builder", "clap_derive", @@ -1384,9 +1384,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.21" +version = "4.5.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b17a95aa67cc7b5ebd32aa5370189aa0d79069ef1c64ce893bd30fb24bff20ec" +checksum = "6e24c1b4099818523236a8ca881d2b45db98dadfb4625cf6608c12069fcbbde1" dependencies = [ "anstream", "anstyle", @@ -1403,7 +1403,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -1522,7 +1522,7 @@ dependencies = [ "tonic", "tracing", "tracing-core", - "tracing-subscriber 0.3.18", + "tracing-subscriber 0.3.19", ] [[package]] @@ -1799,7 +1799,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -1823,7 +1823,7 @@ dependencies = [ "proc-macro2", "quote", "strsim", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -1834,7 +1834,7 @@ checksum = "d336a2a514f6ccccaa3e09b02d41d35330c07ddf03a62165fcec10bb561c7806" dependencies = [ "darling_core", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -1885,7 +1885,7 @@ checksum = "bc2323e10c92e1cf4d86e11538512e6dc03ceb586842970b6332af3d4046a046" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -1942,7 +1942,7 @@ checksum = "30542c1ad912e0e3d22a1935c290e12e8a29d704a420177a31faad4a601a0800" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -1963,7 +1963,7 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -1973,7 +1973,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ab63b0e2bf4d5928aff72e83a7dace85d7bba5fe12dcc3c5a572d78caffd3f3c" dependencies = [ "derive_builder_core", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -1986,7 +1986,7 @@ dependencies = [ "proc-macro2", "quote", "rustc_version 0.4.1", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -2006,7 +2006,7 @@ checksum = "cb7330aeadfbe296029522e6c40f315320aba36fc43a5b3632f3795348f3bd22" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", "unicode-xid", ] @@ -2075,7 +2075,7 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -2175,7 +2175,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -2444,7 +2444,7 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -2898,10 +2898,10 @@ dependencies = [ "rand 0.8.5", "serde", "sha2 0.10.8", - "time 0.3.36", + "time 0.3.37", "tokio", "tracing", - "tracing-subscriber 0.3.18", + "tracing-subscriber 0.3.19", "url", "utils", "vbs", @@ -2919,7 +2919,7 @@ dependencies = [ "hotshot-types", "serde", "tagged-base64", - "thiserror 2.0.3", + "thiserror 2.0.4", "tide-disco", "toml", "vbs", @@ -2942,8 +2942,8 @@ dependencies = [ "serde", "sha2 0.10.8", "sha3", - "thiserror 2.0.3", - "time 0.3.36", + "thiserror 2.0.4", + "time 0.3.37", "tokio", "url", "vbs", @@ -2972,11 +2972,11 @@ dependencies = [ "serde", "sha2 0.10.8", "surf-disco", - "time 0.3.36", + "time 0.3.37", "tokio", "toml", "tracing", - "tracing-subscriber 0.3.18", + "tracing-subscriber 0.3.19", "url", ] @@ -3004,7 +3004,7 @@ dependencies = [ "derive_builder", "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -3085,8 +3085,8 @@ dependencies = [ "sha2 0.10.8", "surf-disco", "tagged-base64", - "thiserror 2.0.3", - "time 0.3.36", + "thiserror 2.0.4", + "time 0.3.37", "tokio", "tracing", "url", @@ -3126,7 +3126,7 @@ dependencies = [ "serde", "sha2 0.10.8", "tagged-base64", - "thiserror 2.0.3", + "thiserror 2.0.4", "tide-disco", "tokio", "tracing", @@ -3177,8 +3177,8 @@ dependencies = [ "serde_json", "sha2 0.10.8", "tagged-base64", - "thiserror 2.0.3", - "time 0.3.36", + "thiserror 2.0.4", + "time 0.3.37", "tokio", "toml", "tracing", @@ -3576,7 +3576,7 @@ checksum = "1ec89e9337638ecdc08744df490b221a7399bf8d164eb52a665454e60e075ad6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -3666,18 +3666,18 @@ dependencies = [ [[package]] name = "impl-codec" -version = "0.7.0" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b67aa010c1e3da95bf151bd8b4c059b2ed7e75387cdb969b4f8f2723a43f9941" +checksum = "ba6a270039626615617f3f36d15fc827041df3b78c439da2cadfa47455a77f2f" dependencies = [ "parity-scale-codec", ] [[package]] name = "impl-serde" -version = "0.5.0" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a143eada6a1ec4aefa5049037a26a6d597bfd64f8c026d07b77133e02b7dd0b" +checksum = "ebc88fc67028ae3db0c853baa36269d398d5f45b6982f95549ff5def78c935cd" dependencies = [ "serde", ] @@ -4104,7 +4104,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4979f22fdb869068da03c9f7528f8297c6fd2606bc3a4affe42e6a823fdb8da4" dependencies = [ "cfg-if", - "windows-targets 0.52.6", + "windows-targets 0.48.5", ] [[package]] @@ -4339,7 +4339,7 @@ dependencies = [ "smallvec", "thiserror 1.0.68", "tracing", - "uint 0.9.5", + "uint", "void", ] @@ -4406,6 +4406,7 @@ dependencies = [ "serde", "tokio", "tracing", + "tracing-subscriber 0.3.19", ] [[package]] @@ -4487,7 +4488,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -4745,7 +4746,7 @@ dependencies = [ "proc-macro-error", "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -5133,7 +5134,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -5189,7 +5190,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -5255,7 +5256,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -5355,7 +5356,7 @@ dependencies = [ "pest_meta", "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -5386,7 +5387,7 @@ checksum = "3c0f5fad0874fc7abcd4d750e76917eaebbecaa2c20bde22e1dbeeba8beb758c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -5524,19 +5525,19 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "64d1ec885c64d0457d564db4ec299b2dae3f9c02808b8ad9c3a089c591b18033" dependencies = [ "proc-macro2", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] name = "primitive-types" -version = "0.13.1" +version = "0.12.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d15600a7d856470b7d278b3fe0e311fe28c2526348549f8ef2ff7db3299c87f5" +checksum = "0b34d9fd68ae0b74a41b21c03c2f62847aa0ffea044eee893b4c140b37e244e2" dependencies = [ "fixed-hash", "impl-codec", "impl-serde", - "uint 0.10.0", + "uint", ] [[package]] @@ -5621,7 +5622,7 @@ checksum = "440f724eba9f6996b75d63681b0a92b06947f1457076d503a4d2e2c8f56442b8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -5644,7 +5645,7 @@ dependencies = [ "itertools 0.12.1", "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -5770,7 +5771,7 @@ dependencies = [ "once_cell", "socket2 0.5.7", "tracing", - "windows-sys 0.59.0", + "windows-sys 0.52.0", ] [[package]] @@ -5887,7 +5888,7 @@ checksum = "52c4f3084aa3bc7dfbba4eff4fab2a54db4324965d8872ab933565e6fbd83bc6" dependencies = [ "pem", "ring 0.16.20", - "time 0.3.36", + "time 0.3.37", "yasna", ] @@ -5900,7 +5901,7 @@ dependencies = [ "pem", "ring 0.17.8", "rustls-pki-types", - "time 0.3.36", + "time 0.3.37", "x509-parser", "yasna", ] @@ -6443,13 +6444,13 @@ dependencies = [ [[package]] name = "serde-inline-default" -version = "0.2.2" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3acbd21cb24261fc36f595b38d3b34d0ff4e31a6b42edd6a43387d27c5787c8" +checksum = "59fb1bedd774187d304179493b0d3c41fbe97b04b14305363f68d2bdf5e47cb9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -6469,7 +6470,7 @@ checksum = "ad1e866f866923f252f05c889987993144fb74e722403468a4ebd70c3cd756c0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -6540,7 +6541,7 @@ dependencies = [ "serde_derive", "serde_json", "serde_with_macros", - "time 0.3.36", + "time 0.3.37", ] [[package]] @@ -6552,7 +6553,7 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -6755,7 +6756,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -6859,7 +6860,7 @@ dependencies = [ "smallvec", "sqlformat", "thiserror 1.0.68", - "time 0.3.36", + "time 0.3.37", "tokio", "tokio-stream", "tracing", @@ -6876,7 +6877,7 @@ dependencies = [ "quote", "sqlx-core", "sqlx-macros-core", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -6899,7 +6900,7 @@ dependencies = [ "sqlx-mysql", "sqlx-postgres", "sqlx-sqlite", - "syn 2.0.89", + "syn 2.0.90", "tempfile", "tokio", "url", @@ -6943,7 +6944,7 @@ dependencies = [ "sqlx-core", "stringprep", "thiserror 1.0.68", - "time 0.3.36", + "time 0.3.37", "tracing", "whoami", ] @@ -6982,7 +6983,7 @@ dependencies = [ "sqlx-core", "stringprep", "thiserror 1.0.68", - "time 0.3.36", + "time 0.3.37", "tracing", "whoami", ] @@ -7006,7 +7007,7 @@ dependencies = [ "serde", "serde_urlencoded", "sqlx-core", - "time 0.3.36", + "time 0.3.37", "tracing", "url", ] @@ -7114,7 +7115,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -7233,9 +7234,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.89" +version = "2.0.90" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44d46482f1c1c87acd84dea20c1bf5ebff4c757009ed6bf19cfd36fb10e92c4e" +checksum = "919d3b74a5dd0ccd15aeb8f93e7006bd9e14c295087c9896a110f490752bcf31" dependencies = [ "proc-macro2", "quote", @@ -7265,7 +7266,7 @@ checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -7366,11 +7367,11 @@ dependencies = [ [[package]] name = "thiserror" -version = "2.0.3" +version = "2.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c006c85c7651b3cf2ada4584faa36773bd07bac24acfb39f3c431b36d7e667aa" +checksum = "2f49a1853cf82743e3b7950f77e0f4d622ca36cf4317cba00c767838bac8d490" dependencies = [ - "thiserror-impl 2.0.3", + "thiserror-impl 2.0.4", ] [[package]] @@ -7381,18 +7382,18 @@ checksum = "a7c61ec9a6f64d2793d8a45faba21efbe3ced62a886d44c36a009b2b519b4c7e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] name = "thiserror-impl" -version = "2.0.3" +version = "2.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f077553d607adc1caf65430528a576c757a71ed73944b66ebb58ef2bbd243568" +checksum = "8381894bb3efe0c4acac3ded651301ceee58a15d47c2e34885ed1908ad667061" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -7485,7 +7486,7 @@ dependencies = [ "tracing-distributed", "tracing-futures", "tracing-log", - "tracing-subscriber 0.3.18", + "tracing-subscriber 0.3.19", "url", "vbs", ] @@ -7525,9 +7526,9 @@ dependencies = [ [[package]] name = "time" -version = "0.3.36" +version = "0.3.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5dfd88e563464686c916c7e46e623e520ddc6d79fa6641390f2e3fa86e83e885" +checksum = "35e7868883861bd0e56d9ac6efcaaca0d6d5d82a2a7ec8209ff492c07cf37b21" dependencies = [ "deranged", "itoa", @@ -7535,7 +7536,7 @@ dependencies = [ "powerfmt", "serde", "time-core", - "time-macros 0.2.18", + "time-macros 0.2.19", ] [[package]] @@ -7556,9 +7557,9 @@ dependencies = [ [[package]] name = "time-macros" -version = "0.2.18" +version = "0.2.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f252a68540fde3a3877aeea552b832b40ab9a69e318efd078774a01ddee1ccf" +checksum = "2834e6017e3e5e4b9834939793b282bc03b37a3336245fa820e35e233e2a85de" dependencies = [ "num-conv", "time-core", @@ -7613,9 +7614,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.41.1" +version = "1.42.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22cfb5bee7a6a52939ca9224d6ac897bb669134078daa8735560897f69de4d33" +checksum = "5cec9b21b0450273377fc97bd4c33a8acffc8c996c987a7c5b319a0083707551" dependencies = [ "backtrace", "bytes", @@ -7647,7 +7648,7 @@ checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -7801,9 +7802,9 @@ checksum = "8df9b6e13f2d32c91b9bd719c00d1958837bc7dec474d94952798cc8e69eeec3" [[package]] name = "tracing" -version = "0.1.40" +version = "0.1.41" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3523ab5a71916ccf420eebdf5521fcef02141234bbc0b8a49f2fdc4544364ef" +checksum = "784e0ac535deb450455cbfa28a6f0df145ea1bb7ae51b821cf5e7927fdcfbdd0" dependencies = [ "log", "pin-project-lite 0.2.15", @@ -7813,20 +7814,20 @@ dependencies = [ [[package]] name = "tracing-attributes" -version = "0.1.27" +version = "0.1.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" +checksum = "395ae124c09f9e6918a2310af6038fba074bcf474ac352496d5910dd59a2226d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] name = "tracing-core" -version = "0.1.32" +version = "0.1.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c06d3da6113f116aaee68e4d601191614c9053067f9ab7f6edbcb161237daa54" +checksum = "e672c95779cf947c5311f83787af4fa8fffd12fb27e4993211a84bdfd9610f9c" dependencies = [ "once_cell", "valuable", @@ -7841,7 +7842,7 @@ dependencies = [ "itertools 0.9.0", "tracing", "tracing-core", - "tracing-subscriber 0.3.18", + "tracing-subscriber 0.3.19", ] [[package]] @@ -7867,9 +7868,9 @@ dependencies = [ [[package]] name = "tracing-serde" -version = "0.1.3" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc6b213177105856957181934e4920de57730fc69bf42c37ee5bb664d406d9e1" +checksum = "704b1aeb7be0d0a84fc9828cae51dab5970fee5088f83d1dd7ee6f6246fc6ff1" dependencies = [ "serde", "tracing-core", @@ -7886,9 +7887,9 @@ dependencies = [ [[package]] name = "tracing-subscriber" -version = "0.3.18" +version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad0f048c97dbd9faa9b7df56362b8ebcaa52adb06b498c050d2f4e32f90a7a8b" +checksum = "e8189decb5ac0fa7bc8b96b7cb9b2701d60d48805aca84a238004d665fcc4008" dependencies = [ "matchers", "nu-ansi-term", @@ -7962,18 +7963,6 @@ dependencies = [ "static_assertions", ] -[[package]] -name = "uint" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "909988d098b2f738727b161a106cfc7cab00c539c2687a8836f8e565976fb53e" -dependencies = [ - "byteorder", - "crunchy", - "hex", - "static_assertions", -] - [[package]] name = "unicase" version = "2.8.0" @@ -8293,7 +8282,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", "wasm-bindgen-shared", ] @@ -8327,7 +8316,7 @@ checksum = "26c6ab57572f7a24a4985830b120de1594465e5d500f24afe89e16b4e833ef68" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -8685,7 +8674,7 @@ dependencies = [ "ring 0.17.8", "rusticata-macros", "thiserror 1.0.68", - "time 0.3.36", + "time 0.3.37", ] [[package]] @@ -8720,7 +8709,7 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e17bb3549cc1321ae1296b9cdc2698e2b6cb1992adfa19a8c72e5b7a738f44cd" dependencies = [ - "time 0.3.36", + "time 0.3.37", ] [[package]] @@ -8743,7 +8732,7 @@ checksum = "28cc31741b18cb6f1d5ff12f5b7523e3d6eb0852bbbad19d73905511d9849b95" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", "synstructure", ] @@ -8765,7 +8754,7 @@ checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -8785,7 +8774,7 @@ checksum = "0ea7b4a3637ea8669cedf0f1fd5c286a17f3de97b8dd5a70a6c167a1730e63a5" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", "synstructure", ] @@ -8806,7 +8795,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] [[package]] @@ -8828,5 +8817,5 @@ checksum = "6eafa6dfb17584ea3e2bd6e76e0cc15ad7af12b09abdd1ca55961bed9b1063c6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.89", + "syn 2.0.90", ] diff --git a/Cargo.toml b/Cargo.toml index 563844630e..4ebda6b06d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -52,7 +52,7 @@ derive_more = { version = "1.0" } digest = "0.10" either = "1.13" espresso-systems-common = { git = "https://github.com/espressosystems/espresso-systems-common", tag = "0.4.1" } -primitive-types = { version = "0.13.1", default-features = false, features = [ +primitive-types = { version = "0.12.2", default-features = false, features = [ "serde", ] } futures = { version = "0.3", default-features = false } @@ -74,7 +74,7 @@ rand_chacha = { version = "0.3", default-features = false } serde = { version = "1", features = ["derive"] } serde-inline-default = "0.2" serde_bytes = { version = "0.11" } -serde_json = { version = "1.0" } +serde_json = { version = "1" } sha2 = "0.10" thiserror = "2" surf-disco = "0.9" diff --git a/README.md b/README.md index c641255b0f..dc1b35ed97 100644 --- a/README.md +++ b/README.md @@ -97,7 +97,7 @@ RUST_LOG=$ERROR_LOG_LEVEL RUST_LOG_FORMAT=$ERROR_LOG_FORMAT just run_test test_s ## Careful -To double check for UB: +To double-check for UB: ```bash nix develop .#correctnessShell diff --git a/audits/README.md b/audits/README.md index 49e3c6039c..1df2944f3b 100644 --- a/audits/README.md +++ b/audits/README.md @@ -5,4 +5,4 @@ Internal audits | Scope & Delivery date | Report | |-----------------------------|---------------------------------------------------------------| -| HotShot - July 29, 2024 | [Report](./internal-reviews/EspressoHotShot-2024internal.pdf) | \ No newline at end of file +| HotShot - July 29, 2024 | [Report](./internal-reviews/EspressoHotshot-2024internal.pdf) | diff --git a/crates/builder-api/src/v0_1/builder.rs b/crates/builder-api/src/v0_1/builder.rs index 399093908c..068d9f6e0a 100644 --- a/crates/builder-api/src/v0_1/builder.rs +++ b/crates/builder-api/src/v0_1/builder.rs @@ -262,8 +262,7 @@ where .body_auto::<::Transaction, Ver>(Ver::instance()) .map_err(Error::TxnUnpack)?; let hash = tx.commit(); - state.txn_status(hash).await.map_err(Error::TxnStat)?; - Ok(hash) + state.txn_status(hash).await.map_err(Error::TxnStat) } .boxed() })?; diff --git a/crates/example-types/src/node_types.rs b/crates/example-types/src/node_types.rs index 80b634515c..01160e1cb9 100644 --- a/crates/example-types/src/node_types.rs +++ b/crates/example-types/src/node_types.rs @@ -4,9 +4,16 @@ // You should have received a copy of the MIT License // along with the HotShot repository. If not, see . +use std::marker::PhantomData; + +pub use hotshot::traits::election::helpers::{ + RandomOverlapQuorumFilterConfig, StableQuorumFilterConfig, +}; use hotshot::traits::{ election::{ - randomized_committee::RandomizedCommittee, static_committee::StaticCommittee, + helpers::QuorumFilterConfig, randomized_committee::RandomizedCommittee, + randomized_committee_members::RandomizedCommitteeMembers, + static_committee::StaticCommittee, static_committee_leader_two_views::StaticCommitteeLeaderForTwoViews, }, implementations::{CombinedNetworks, Libp2pNetwork, MemoryNetwork, PushCdnNetwork}, @@ -87,6 +94,40 @@ impl NodeType for TestTypesRandomizedLeader { type BuilderSignatureKey = BuilderKey; } +#[derive( + Copy, + Clone, + Debug, + Default, + Hash, + PartialEq, + Eq, + PartialOrd, + Ord, + serde::Serialize, + serde::Deserialize, +)] +/// filler struct to implement node type and allow us +/// to select our traits +pub struct TestTypesRandomizedCommitteeMembers { + _pd: PhantomData, +} + +impl NodeType for TestTypesRandomizedCommitteeMembers { + type AuctionResult = TestAuctionResult; + type View = ViewNumber; + type Epoch = EpochNumber; + type BlockHeader = TestBlockHeader; + type BlockPayload = TestBlockPayload; + type SignatureKey = BLSPubKey; + type Transaction = TestTransaction; + type ValidatedState = TestValidatedState; + type InstanceState = TestInstanceState; + type Membership = + RandomizedCommitteeMembers, CONFIG>; + type BuilderSignatureKey = BuilderKey; +} + #[derive( Copy, Clone, @@ -133,7 +174,7 @@ pub struct Libp2pImpl; #[derive(Clone, Debug, Deserialize, Serialize, Hash, Eq, PartialEq)] pub struct WebImpl; -/// Combined Network implementation (libp2p + web sever) +/// Combined Network implementation (libp2p + web server) #[derive(Clone, Debug, Deserialize, Serialize, Hash, Eq, PartialEq)] pub struct CombinedImpl; @@ -223,7 +264,7 @@ impl Versions for EpochsTestVersions { 0, 0, ]; - type Marketplace = StaticVersion<0, 3>; + type Marketplace = StaticVersion<0, 99>; type Epochs = StaticVersion<0, 4>; } diff --git a/crates/example-types/src/storage_types.rs b/crates/example-types/src/storage_types.rs index acedb42007..1a666c737e 100644 --- a/crates/example-types/src/storage_types.rs +++ b/crates/example-types/src/storage_types.rs @@ -14,7 +14,9 @@ use async_lock::RwLock; use async_trait::async_trait; use hotshot_types::{ consensus::CommitmentMap, - data::{DaProposal, Leaf, Leaf2, QuorumProposal, QuorumProposal2, VidDisperseShare}, + data::{ + DaProposal, DaProposal2, Leaf, Leaf2, QuorumProposal, QuorumProposal2, VidDisperseShare, + }, event::HotShotAction, message::Proposal, simple_certificate::{QuorumCertificate2, UpgradeCertificate}, @@ -38,6 +40,7 @@ type VidShares = HashMap< pub struct TestStorageState { vids: VidShares, das: HashMap>>, + da2s: HashMap>>, proposals: BTreeMap>>, proposals2: BTreeMap>>, high_qc: Option>, @@ -51,6 +54,7 @@ impl Default for TestStorageState { Self { vids: HashMap::new(), das: HashMap::new(), + da2s: HashMap::new(), proposals: BTreeMap::new(), proposals2: BTreeMap::new(), high_qc: None, @@ -142,6 +146,21 @@ impl Storage for TestStorage { .insert(proposal.data.view_number, proposal.clone()); Ok(()) } + async fn append_da2( + &self, + proposal: &Proposal>, + _vid_commit: ::Commit, + ) -> Result<()> { + if self.should_return_err { + bail!("Failed to append VID proposal to storage"); + } + Self::run_delay_settings_from_config(&self.delay_config).await; + let mut inner = self.inner.write().await; + inner + .da2s + .insert(proposal.data.view_number, proposal.clone()); + Ok(()) + } async fn append_proposal( &self, proposal: &Proposal>, diff --git a/crates/examples/push-cdn/README.md b/crates/examples/push-cdn/README.md index c460beb89a..a20e46f4ce 100644 --- a/crates/examples/push-cdn/README.md +++ b/crates/examples/push-cdn/README.md @@ -51,7 +51,7 @@ sleep 1m just example_fixed_leader multi-validator-push-cdn -- 9 http://127.0.0.1:4444 ``` -Where ones using `example_gpuvid_leader` could be the leader and should be running on a nvidia GPU, and other validators using `example_fixed_leader` will never be a leader. In practice, these url should be changed to the corresponding ip and port. +Where ones using `example_gpuvid_leader` could be the leader and should be running on an nvidia GPU, and other validators using `example_fixed_leader` will never be a leader. In practice, these url should be changed to the corresponding ip and port. If you don't have a gpu but want to test out fixed leader, you can run: @@ -65,4 +65,4 @@ sleep 1m just example_fixed_leader multi-validator-push-cdn -- 9 http://127.0.0.1:4444 ``` -Remember, you have to run leaders first, then other validators, so that leaders will have lower index. \ No newline at end of file +Remember, you have to run leaders first, then other validators, so that leaders will have lower index. diff --git a/crates/hotshot-stake-table/src/utils.rs b/crates/hotshot-stake-table/src/utils.rs index b295cdeb28..07a3d261b3 100644 --- a/crates/hotshot-stake-table/src/utils.rs +++ b/crates/hotshot-stake-table/src/utils.rs @@ -21,6 +21,6 @@ pub trait ToFields { /// convert a U256 to a field element. pub(crate) fn u256_to_field(v: &U256) -> F { let mut bytes = vec![0u8; 32]; - v.write_as_little_endian(&mut bytes); + v.to_little_endian(&mut bytes); F::from_le_bytes_mod_order(&bytes) } diff --git a/crates/hotshot/src/tasks/task_state.rs b/crates/hotshot/src/tasks/task_state.rs index 3c4e81585b..f97905b6a4 100644 --- a/crates/hotshot/src/tasks/task_state.rs +++ b/crates/hotshot/src/tasks/task_state.rs @@ -12,10 +12,17 @@ use std::{ use async_trait::async_trait; use chrono::Utc; use hotshot_task_impls::{ - builder::BuilderClient, consensus::ConsensusTaskState, da::DaTaskState, - quorum_proposal::QuorumProposalTaskState, quorum_proposal_recv::QuorumProposalRecvTaskState, - quorum_vote::QuorumVoteTaskState, request::NetworkRequestState, rewind::RewindTaskState, - transactions::TransactionTaskState, upgrade::UpgradeTaskState, vid::VidTaskState, + builder::BuilderClient, + consensus::ConsensusTaskState, + da::DaTaskState, + quorum_proposal::QuorumProposalTaskState, + quorum_proposal_recv::QuorumProposalRecvTaskState, + quorum_vote::{drb_computations::DrbComputations, QuorumVoteTaskState}, + request::NetworkRequestState, + rewind::RewindTaskState, + transactions::TransactionTaskState, + upgrade::UpgradeTaskState, + vid::VidTaskState, view_sync::ViewSyncTaskState, }; use hotshot_types::{ @@ -222,6 +229,9 @@ impl, V: Versions> CreateTaskState async fn create_from(handle: &SystemContextHandle) -> Self { let consensus = handle.hotshot.consensus(); + // Clone the consensus metrics + let consensus_metrics = Arc::clone(&consensus.read().await.metrics); + Self { public_key: handle.public_key().clone(), private_key: handle.private_key().clone(), @@ -231,11 +241,13 @@ impl, V: Versions> CreateTaskState vote_dependencies: BTreeMap::new(), network: Arc::clone(&handle.hotshot.network), membership: (*handle.hotshot.memberships).clone().into(), + drb_computations: DrbComputations::new(), output_event_stream: handle.hotshot.external_event_stream.0.clone(), id: handle.hotshot.id, storage: Arc::clone(&handle.storage), upgrade_lock: handle.hotshot.upgrade_lock.clone(), epoch_height: handle.hotshot.config.epoch_height, + consensus_metrics, } } } diff --git a/crates/hotshot/src/traits/election/helpers.rs b/crates/hotshot/src/traits/election/helpers.rs new file mode 100644 index 0000000000..2a2c7fe172 --- /dev/null +++ b/crates/hotshot/src/traits/election/helpers.rs @@ -0,0 +1,442 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + +use std::{collections::BTreeSet, hash::Hash}; + +use rand::{rngs::StdRng, Rng, SeedableRng}; + +/// Helper which allows producing random numbers within a range and preventing duplicates +/// If consumed as a regular iterator, will return a randomly ordered permutation of all +/// values from 0..max +struct NonRepeatValueIterator { + /// Random number generator to use + rng: StdRng, + + /// Values which have already been emitted, to avoid duplicates + values: BTreeSet, + + /// Maximum value, open-ended. Numbers returned will be 0..max + max: u64, +} + +impl NonRepeatValueIterator { + /// Create a new NonRepeatValueIterator + pub fn new(rng: StdRng, max: u64) -> Self { + Self { + rng, + values: BTreeSet::new(), + max, + } + } +} + +impl Iterator for NonRepeatValueIterator { + type Item = u64; + + fn next(&mut self) -> Option { + if self.values.len() as u64 >= self.max { + return None; + } + + loop { + let v = self.rng.gen_range(0..self.max); + if !self.values.contains(&v) { + self.values.insert(v); + return Some(v); + } + } + } +} + +/// Create a single u64 seed by merging two u64s. Done this way to allow easy seeding of the number generator +/// from both a stable SOUND as well as a moving value ROUND (typically, epoch). Shift left by 8 to avoid +/// scenarios where someone manually stepping seeds would pass over the same space of random numbers across +/// sequential rounds. Doesn't have to be 8, but has to be large enough that it is unlikely that a given +/// test run will collide; using 8 means that 256 rounds (epochs) would have to happen inside of a test before +/// the test starts repeating values from SEED+1. +fn make_seed(seed: u64, round: u64) -> u64 { + seed.wrapping_add(round.wrapping_shl(8)) +} + +/// Create a pair of PRNGs for the given SEED and ROUND. Prev_rng is the PRNG for the previous ROUND, used to +/// deterministically replay random numbers generated for the previous ROUND. +fn make_rngs(seed: u64, round: u64) -> (StdRng, StdRng) { + let prev_rng = SeedableRng::seed_from_u64(make_seed(seed, round.wrapping_sub(1))); + let this_rng = SeedableRng::seed_from_u64(make_seed(seed, round)); + + (prev_rng, this_rng) +} + +/// Iterator which returns odd/even values for a given COUNT of nodes. For OVERLAP=0, this will return +/// [0, 2, 4, 6, ...] for an even round, and [1, 3, 5, 7, ...] for an odd round. Setting OVERLAP>0 will +/// randomly introduce OVERLAP elements from the previous round, so an even round with OVERLAP=2 will contain +/// something like [1, 7, 2, 4, 0, ...]. Note that the total number of nodes will always be COUNT/2, so +/// for OVERLAP>0 a random number of nodes which would have been in the round for OVERLAP=0 will be dropped. +/// Ordering of nodes is random. Outputs is deterministic when prev_rng and this_rng are provided by make_rngs +/// using the same values for SEED and ROUND. +pub struct StableQuorumIterator { + /// PRNG from the previous round + prev_rng: NonRepeatValueIterator, + + /// PRNG for the current round + this_rng: NonRepeatValueIterator, + + /// Current ROUND + round: u64, + + /// Count of nodes in the source quorum being filtered against + count: u64, + + /// OVERLAP of nodes to be carried over from the previous round + overlap: u64, + + /// The next call to next() will emit the value with this index. Starts at 0 and is incremented for each + /// call to next() + index: u64, +} + +/// Determines how many possible values can be made for the given odd/even +/// E.g. if count is 5, then possible values would be [0, 1, 2, 3, 4] +/// if odd = true, slots = 2 (1 or 3), else slots = 3 (0, 2, 4) +fn calc_num_slots(count: u64, odd: bool) -> u64 { + (count / 2) + if odd { 0 } else { count % 2 } +} + +impl StableQuorumIterator { + #[must_use] + /// Create a new StableQuorumIterator + /// + /// # Panics + /// + /// panics if overlap is greater than half of count + pub fn new(seed: u64, round: u64, count: u64, overlap: u64) -> Self { + assert!( + count / 2 > overlap, + "Overlap cannot be greater than the entire set size" + ); + + let (prev_rng, this_rng) = make_rngs(seed, round); + + Self { + prev_rng: NonRepeatValueIterator::new(prev_rng, calc_num_slots(count, round % 2 == 0)), + this_rng: NonRepeatValueIterator::new(this_rng, calc_num_slots(count, round % 2 == 1)), + round, + count, + overlap, + index: 0, + } + } +} + +impl Iterator for StableQuorumIterator { + type Item = u64; + + fn next(&mut self) -> Option { + if self.index >= (self.count / 2) { + // Always return exactly half of the possible values. If we have OVERLAP>0 then + // we need to return (COUNT/2)-OVERLAP of the current set, even if there are additional + // even (or odd) numbers that we can return. + None + } else if self.index < self.overlap { + // Generate enough values for the previous round. If the current round is odd, then + // we want to pick even values that were selected from the previous round to create OVERLAP + // even values. + let v = self.prev_rng.next().unwrap(); + self.index += 1; + Some(v * 2 + (1 - self.round % 2)) + } else { + // Generate new values. If our current round is odd, we'll be creating (COUNT/2)-OVERLAP + // odd values here. + let v = self.this_rng.next().unwrap(); + self.index += 1; + Some(v * 2 + self.round % 2) + } + } +} + +#[must_use] +/// Helper function to convert the arguments to a StableQuorumIterator into an ordered set of values. +/// +/// # Panics +/// +/// panics if the arguments are invalid for StableQuorumIterator::new +pub fn stable_quorum_filter(seed: u64, round: u64, count: usize, overlap: u64) -> BTreeSet { + StableQuorumIterator::new(seed, round, count as u64, overlap) + // We should never have more than u32_max members in a test + .map(|x| usize::try_from(x).unwrap()) + .collect() +} + +/// Constructs a quorum with a random number of members and overlaps. Functions similar to StableQuorumIterator, +/// except that the number of MEMBERS and OVERLAP are also (deterministically) random, to allow additional variance +/// in testing. +pub struct RandomOverlapQuorumIterator { + /// PRNG from the previous round + prev_rng: NonRepeatValueIterator, + + /// PRNG for the current round + this_rng: NonRepeatValueIterator, + + /// Current ROUND + round: u64, + + /// Number of members to emit for the current round + members: u64, + + /// OVERLAP of nodes to be carried over from the previous round + overlap: u64, + + /// The next call to next() will emit the value with this index. Starts at 0 and is incremented for each + /// call to next() + index: u64, +} + +impl RandomOverlapQuorumIterator { + #[must_use] + /// Create a new RandomOverlapQuorumIterator + /// + /// # Panics + /// + /// panics if overlap and members can produce invalid results or if ranges are invalid + pub fn new( + seed: u64, + round: u64, + count: u64, + members_min: u64, + members_max: u64, + overlap_min: u64, + overlap_max: u64, + ) -> Self { + assert!( + members_min <= members_max, + "Members_min cannot be greater than members_max" + ); + assert!( + overlap_min <= overlap_max, + "Overlap_min cannot be greater than overlap_max" + ); + assert!( + overlap_max < members_min, + "Overlap_max must be less than members_min" + ); + assert!( + count / 2 > overlap_max, + "Overlap cannot be greater than the entire set size" + ); + + let (mut prev_rng, mut this_rng) = make_rngs(seed, round); + + // Consume two values from prev_rng to advance it to the same state it was at the beginning of the previous round + let _prev_members = prev_rng.gen_range(members_min..=members_max); + let _prev_overlap = prev_rng.gen_range(overlap_min..=overlap_max); + let this_members = this_rng.gen_range(members_min..=members_max); + let this_overlap = this_rng.gen_range(overlap_min..=overlap_max); + + Self { + prev_rng: NonRepeatValueIterator::new(prev_rng, calc_num_slots(count, round % 2 == 0)), + this_rng: NonRepeatValueIterator::new(this_rng, calc_num_slots(count, round % 2 == 1)), + round, + members: this_members, + overlap: this_overlap, + index: 0, + } + } +} + +impl Iterator for RandomOverlapQuorumIterator { + type Item = u64; + + fn next(&mut self) -> Option { + if self.index >= self.members { + None + } else if self.index < self.overlap { + // Generate enough values for the previous round + let v = self.prev_rng.next().unwrap(); + self.index += 1; + Some(v * 2 + (1 - self.round % 2)) + } else { + // Generate new values + let v = self.this_rng.next().unwrap(); + self.index += 1; + Some(v * 2 + self.round % 2) + } + } +} + +#[must_use] +/// Helper function to convert the arguments to a StableQuorumIterator into an ordered set of values. +/// +/// # Panics +/// +/// panics if the arguments are invalid for RandomOverlapQuorumIterator::new +pub fn random_overlap_quorum_filter( + seed: u64, + round: u64, + count: usize, + members_min: u64, + members_max: u64, + overlap_min: u64, + overlap_max: u64, +) -> BTreeSet { + RandomOverlapQuorumIterator::new( + seed, + round, + count as u64, + members_min, + members_max, + overlap_min, + overlap_max, + ) + // We should never have more than u32_max members in a test + .map(|x| usize::try_from(x).unwrap()) + .collect() +} + +/// Trait wrapping a config for quorum filters. This allows selection between either the StableQuorumIterator or the +/// RandomOverlapQuorumIterator functionality from above +pub trait QuorumFilterConfig: + Copy + + Clone + + std::fmt::Debug + + Default + + Send + + Sync + + Ord + + PartialOrd + + Eq + + PartialEq + + Hash + + 'static +{ + /// Called to run the filter and return a set of indices + fn execute(epoch: u64, count: usize) -> BTreeSet; +} + +#[derive(Debug, Copy, Clone, Default, Eq, PartialEq, Hash, Ord, PartialOrd)] +/// Provides parameters to use the StableQuorumIterator +pub struct StableQuorumFilterConfig {} + +impl QuorumFilterConfig + for StableQuorumFilterConfig +{ + fn execute(epoch: u64, count: usize) -> BTreeSet { + stable_quorum_filter(SEED, epoch, count, OVERLAP) + } +} + +#[derive(Debug, Copy, Clone, Default, Eq, PartialEq, Hash, Ord, PartialOrd)] +/// Provides parameters to use the RandomOverlapQuorumIterator +pub struct RandomOverlapQuorumFilterConfig< + const SEED: u64, + const MEMBERS_MIN: u64, + const MEMBERS_MAX: u64, + const OVERLAP_MIN: u64, + const OVERLAP_MAX: u64, +> {} + +impl< + const SEED: u64, + const MEMBERS_MIN: u64, + const MEMBERS_MAX: u64, + const OVERLAP_MIN: u64, + const OVERLAP_MAX: u64, + > QuorumFilterConfig + for RandomOverlapQuorumFilterConfig +{ + fn execute(epoch: u64, count: usize) -> BTreeSet { + random_overlap_quorum_filter( + SEED, + epoch, + count, + MEMBERS_MIN, + MEMBERS_MAX, + OVERLAP_MIN, + OVERLAP_MAX, + ) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_stable() { + for _ in 0..100 { + let seed = rand::random::(); + let prev_set: Vec = StableQuorumIterator::new(seed, 1, 10, 2).collect(); + let this_set: Vec = StableQuorumIterator::new(seed, 2, 10, 2).collect(); + + // The first two elements from prev_set are from its previous round. But its 2nd and 3rd elements + // are new, and should be carried over to become the first two elements from this_set. + assert_eq!( + prev_set[2..4], + this_set[0..2], + "prev_set={prev_set:?}, this_set={this_set:?}" + ); + } + } + + #[test] + fn test_random_overlap() { + for _ in 0..100 { + let seed = rand::random::(); + let prev_set: Vec = + RandomOverlapQuorumIterator::new(seed, 1, 20, 5, 10, 2, 3).collect(); + let this_set: Vec = + RandomOverlapQuorumIterator::new(seed, 2, 20, 5, 10, 2, 3).collect(); + + // Similar to the overlap before, but there are 4 possible cases: the previous set might have had + // either 2 or 3 overlaps, meaning we should start with index 2 or 3, and the overlap size might + // be either 2 or 3. We'll just check for 2 overlaps, meaning we have two possible overlap cases + // to verify. + let matched = (prev_set[2..4] == this_set[0..2]) || (prev_set[3..5] == this_set[0..2]); + assert!(matched, "prev_set={prev_set:?}, this_set={this_set:?}"); + } + } + + #[test] + fn test_odd_even() { + for _ in 0..100 { + let seed = rand::random::(); + + let odd_set: Vec = StableQuorumIterator::new(seed, 1, 10, 2).collect(); + let even_set: Vec = StableQuorumIterator::new(seed, 2, 10, 2).collect(); + + assert!( + odd_set[2] % 2 == 1, + "odd set non-overlap value should be odd (stable)" + ); + assert!( + even_set[2] % 2 == 0, + "even set non-overlap value should be even (stable)" + ); + + let odd_set: Vec = + RandomOverlapQuorumIterator::new(seed, 1, 20, 5, 10, 2, 3).collect(); + let even_set: Vec = + RandomOverlapQuorumIterator::new(seed, 2, 20, 5, 10, 2, 3).collect(); + + assert!( + odd_set[3] % 2 == 1, + "odd set non-overlap value should be odd (random overlap)" + ); + assert!( + even_set[3] % 2 == 0, + "even set non-overlap value should be even (random overlap)" + ); + } + } + + #[test] + fn calc_num_slots_test() { + assert_eq!(calc_num_slots(5, true), 2); + assert_eq!(calc_num_slots(5, false), 3); + + assert_eq!(calc_num_slots(6, true), 3); + assert_eq!(calc_num_slots(6, false), 3); + } +} diff --git a/crates/hotshot/src/traits/election.rs b/crates/hotshot/src/traits/election/mod.rs similarity index 79% rename from crates/hotshot/src/traits/election.rs rename to crates/hotshot/src/traits/election/mod.rs index 427ed12629..914b9bbb33 100644 --- a/crates/hotshot/src/traits/election.rs +++ b/crates/hotshot/src/traits/election/mod.rs @@ -6,11 +6,17 @@ //! elections used for consensus -/// Dynamic leader election with epochs. -pub mod dynamic; /// leader completely randomized every view pub mod randomized_committee; + +/// quorum randomized every view, with configurable overlap +pub mod randomized_committee_members; + /// static (round robin) committee election pub mod static_committee; + /// static (round robin leader for 2 consecutive views) committee election pub mod static_committee_leader_two_views; + +/// general helpers +pub mod helpers; diff --git a/crates/hotshot/src/traits/election/randomized_committee.rs b/crates/hotshot/src/traits/election/randomized_committee.rs index 2b721a66e0..4046123553 100644 --- a/crates/hotshot/src/traits/election/randomized_committee.rs +++ b/crates/hotshot/src/traits/election/randomized_committee.rs @@ -226,22 +226,22 @@ impl Membership for RandomizedCommittee { self.da_stake_table.len() } /// Get the voting success threshold for the committee - fn success_threshold(&self) -> NonZeroU64 { + fn success_threshold(&self, _epoch: ::Epoch) -> NonZeroU64 { NonZeroU64::new(((self.stake_table.len() as u64 * 2) / 3) + 1).unwrap() } /// Get the voting success threshold for the committee - fn da_success_threshold(&self) -> NonZeroU64 { + fn da_success_threshold(&self, _epoch: ::Epoch) -> NonZeroU64 { NonZeroU64::new(((self.da_stake_table.len() as u64 * 2) / 3) + 1).unwrap() } /// Get the voting failure threshold for the committee - fn failure_threshold(&self) -> NonZeroU64 { + fn failure_threshold(&self, _epoch: ::Epoch) -> NonZeroU64 { NonZeroU64::new(((self.stake_table.len() as u64) / 3) + 1).unwrap() } /// Get the voting upgrade threshold for the committee - fn upgrade_threshold(&self) -> NonZeroU64 { + fn upgrade_threshold(&self, _epoch: ::Epoch) -> NonZeroU64 { NonZeroU64::new(max( (self.stake_table.len() as u64 * 9) / 10, ((self.stake_table.len() as u64 * 2) / 3) + 1, diff --git a/crates/hotshot/src/traits/election/randomized_committee_members.rs b/crates/hotshot/src/traits/election/randomized_committee_members.rs new file mode 100644 index 0000000000..5c85ad9c07 --- /dev/null +++ b/crates/hotshot/src/traits/election/randomized_committee_members.rs @@ -0,0 +1,353 @@ +// Copyright (c) 2021-2024 Espresso Systems (espressosys.com) +// This file is part of the HotShot repository. + +// You should have received a copy of the MIT License +// along with the HotShot repository. If not, see . + +use std::{ + cmp::max, + collections::{BTreeMap, BTreeSet}, + marker::PhantomData, + num::NonZeroU64, +}; + +use hotshot_types::{ + traits::{ + election::Membership, + node_implementation::{ConsensusTime, NodeType}, + signature_key::{SignatureKey, StakeTableEntryType}, + }, + PeerConfig, +}; +use primitive_types::U256; +use rand::{rngs::StdRng, Rng}; +use utils::anytrace::Result; + +use crate::traits::election::helpers::QuorumFilterConfig; + +#[derive(Clone, Debug, Eq, PartialEq, Hash)] +/// The static committee election +pub struct RandomizedCommitteeMembers { + /// The nodes eligible for leadership. + /// NOTE: This is currently a hack because the DA leader needs to be the quorum + /// leader but without voting rights. + eligible_leaders: Vec<::StakeTableEntry>, + + /// The nodes on the committee and their stake + stake_table: Vec<::StakeTableEntry>, + + /// The nodes on the da committee and their stake + da_stake_table: Vec<::StakeTableEntry>, + + /// The nodes on the committee and their stake, indexed by public key + indexed_stake_table: + BTreeMap::StakeTableEntry>, + + /// The nodes on the da committee and their stake, indexed by public key + indexed_da_stake_table: + BTreeMap::StakeTableEntry>, + + /// Phantom + _pd: PhantomData, +} + +impl RandomizedCommitteeMembers { + /// Creates a set of indices into the stake_table which reference the nodes selected for this epoch's committee + fn make_quorum_filter(&self, epoch: ::Epoch) -> BTreeSet { + CONFIG::execute(epoch.u64(), self.stake_table.len()) + } + + /// Creates a set of indices into the da_stake_table which reference the nodes selected for this epoch's da committee + fn make_da_quorum_filter(&self, epoch: ::Epoch) -> BTreeSet { + CONFIG::execute(epoch.u64(), self.da_stake_table.len()) + } +} + +impl Membership + for RandomizedCommitteeMembers +{ + type Error = utils::anytrace::Error; + + /// Create a new election + fn new( + committee_members: Vec::SignatureKey>>, + da_members: Vec::SignatureKey>>, + ) -> Self { + // For each eligible leader, get the stake table entry + let eligible_leaders: Vec<::StakeTableEntry> = + committee_members + .iter() + .map(|member| member.stake_table_entry.clone()) + .filter(|entry| entry.stake() > U256::zero()) + .collect(); + + // For each member, get the stake table entry + let members: Vec<::StakeTableEntry> = + committee_members + .iter() + .map(|member| member.stake_table_entry.clone()) + .filter(|entry| entry.stake() > U256::zero()) + .collect(); + + // For each da member, get the stake table entry + let da_members: Vec<::StakeTableEntry> = da_members + .iter() + .map(|member| member.stake_table_entry.clone()) + .filter(|entry| entry.stake() > U256::zero()) + .collect(); + + // Index the stake table by public key + let indexed_stake_table: BTreeMap< + TYPES::SignatureKey, + ::StakeTableEntry, + > = members + .iter() + .map(|entry| (TYPES::SignatureKey::public_key(entry), entry.clone())) + .collect(); + + // Index the stake table by public key + let indexed_da_stake_table: BTreeMap< + TYPES::SignatureKey, + ::StakeTableEntry, + > = da_members + .iter() + .map(|entry| (TYPES::SignatureKey::public_key(entry), entry.clone())) + .collect(); + + Self { + eligible_leaders, + stake_table: members, + da_stake_table: da_members, + indexed_stake_table, + indexed_da_stake_table, + _pd: PhantomData, + } + } + + /// Get the stake table for the current view + fn stake_table( + &self, + epoch: ::Epoch, + ) -> Vec<<::SignatureKey as SignatureKey>::StakeTableEntry> { + let filter = self.make_quorum_filter(epoch); + //self.stake_table.clone()s + self.stake_table + .iter() + .enumerate() + .filter(|(idx, _)| filter.contains(idx)) + .map(|(_, v)| v.clone()) + .collect() + } + + /// Get the da stake table for the current view + fn da_stake_table( + &self, + epoch: ::Epoch, + ) -> Vec<<::SignatureKey as SignatureKey>::StakeTableEntry> { + let filter = self.make_da_quorum_filter(epoch); + //self.stake_table.clone()s + self.da_stake_table + .iter() + .enumerate() + .filter(|(idx, _)| filter.contains(idx)) + .map(|(_, v)| v.clone()) + .collect() + } + + /// Get all members of the committee for the current view + fn committee_members( + &self, + _view_number: ::View, + epoch: ::Epoch, + ) -> BTreeSet<::SignatureKey> { + let filter = self.make_quorum_filter(epoch); + self.stake_table + .iter() + .enumerate() + .filter(|(idx, _)| filter.contains(idx)) + .map(|(_, v)| TYPES::SignatureKey::public_key(v)) + .collect() + } + + /// Get all members of the committee for the current view + fn da_committee_members( + &self, + _view_number: ::View, + epoch: ::Epoch, + ) -> BTreeSet<::SignatureKey> { + let filter = self.make_da_quorum_filter(epoch); + self.da_stake_table + .iter() + .enumerate() + .filter(|(idx, _)| filter.contains(idx)) + .map(|(_, v)| TYPES::SignatureKey::public_key(v)) + .collect() + } + + /// Get all eligible leaders of the committee for the current view + fn committee_leaders( + &self, + view_number: ::View, + epoch: ::Epoch, + ) -> BTreeSet<::SignatureKey> { + self.committee_members(view_number, epoch) + } + + /// Get the stake table entry for a public key + fn stake( + &self, + pub_key: &::SignatureKey, + epoch: ::Epoch, + ) -> Option<::StakeTableEntry> { + let filter = self.make_quorum_filter(epoch); + let actual_members: BTreeSet<_> = self + .stake_table + .iter() + .enumerate() + .filter(|(idx, _)| filter.contains(idx)) + .map(|(_, v)| TYPES::SignatureKey::public_key(v)) + .collect(); + + if actual_members.contains(pub_key) { + // Only return the stake if it is above zero + self.indexed_stake_table.get(pub_key).cloned() + } else { + // Skip members which aren't included based on the quorum filter + None + } + } + + /// Get the da stake table entry for a public key + fn da_stake( + &self, + pub_key: &::SignatureKey, + epoch: ::Epoch, + ) -> Option<::StakeTableEntry> { + let filter = self.make_da_quorum_filter(epoch); + let actual_members: BTreeSet<_> = self + .da_stake_table + .iter() + .enumerate() + .filter(|(idx, _)| filter.contains(idx)) + .map(|(_, v)| TYPES::SignatureKey::public_key(v)) + .collect(); + + if actual_members.contains(pub_key) { + // Only return the stake if it is above zero + self.indexed_da_stake_table.get(pub_key).cloned() + } else { + // Skip members which aren't included based on the quorum filter + None + } + } + + /// Check if a node has stake in the committee + fn has_stake( + &self, + pub_key: &::SignatureKey, + epoch: ::Epoch, + ) -> bool { + let filter = self.make_quorum_filter(epoch); + let actual_members: BTreeSet<_> = self + .stake_table + .iter() + .enumerate() + .filter(|(idx, _)| filter.contains(idx)) + .map(|(_, v)| TYPES::SignatureKey::public_key(v)) + .collect(); + + if actual_members.contains(pub_key) { + self.indexed_stake_table + .get(pub_key) + .is_some_and(|x| x.stake() > U256::zero()) + } else { + // Skip members which aren't included based on the quorum filter + false + } + } + + /// Check if a node has stake in the committee + fn has_da_stake( + &self, + pub_key: &::SignatureKey, + epoch: ::Epoch, + ) -> bool { + let filter = self.make_da_quorum_filter(epoch); + let actual_members: BTreeSet<_> = self + .da_stake_table + .iter() + .enumerate() + .filter(|(idx, _)| filter.contains(idx)) + .map(|(_, v)| TYPES::SignatureKey::public_key(v)) + .collect(); + + if actual_members.contains(pub_key) { + self.indexed_da_stake_table + .get(pub_key) + .is_some_and(|x| x.stake() > U256::zero()) + } else { + // Skip members which aren't included based on the quorum filter + false + } + } + + /// Index the vector of public keys with the current view number + fn lookup_leader( + &self, + view_number: TYPES::View, + epoch: ::Epoch, + ) -> Result { + let filter = self.make_quorum_filter(epoch); + let leader_vec: Vec<_> = self + .stake_table + .iter() + .enumerate() + .filter(|(idx, _)| filter.contains(idx)) + .map(|(_, v)| v.clone()) + .collect(); + + let mut rng: StdRng = rand::SeedableRng::seed_from_u64(*view_number); + + let randomized_view_number: u64 = rng.gen_range(0..=u64::MAX); + #[allow(clippy::cast_possible_truncation)] + let index = randomized_view_number as usize % leader_vec.len(); + + let res = leader_vec[index].clone(); + + Ok(TYPES::SignatureKey::public_key(&res)) + } + + /// Get the total number of nodes in the committee + fn total_nodes(&self, epoch: ::Epoch) -> usize { + self.make_quorum_filter(epoch).len() + } + + /// Get the total number of nodes in the committee + fn da_total_nodes(&self, epoch: ::Epoch) -> usize { + self.make_da_quorum_filter(epoch).len() + } + + /// Get the voting success threshold for the committee + fn success_threshold(&self, epoch: ::Epoch) -> NonZeroU64 { + let len = self.total_nodes(epoch); + NonZeroU64::new(((len as u64 * 2) / 3) + 1).unwrap() + } + + /// Get the voting success threshold for the committee + fn da_success_threshold(&self, epoch: ::Epoch) -> NonZeroU64 { + let len = self.da_total_nodes(epoch); + NonZeroU64::new(((len as u64 * 2) / 3) + 1).unwrap() + } + + /// Get the voting failure threshold for the committee + fn failure_threshold(&self, epoch: ::Epoch) -> NonZeroU64 { + let len = self.total_nodes(epoch); + NonZeroU64::new(((len as u64) / 3) + 1).unwrap() + } + + /// Get the voting upgrade threshold for the committee + fn upgrade_threshold(&self, epoch: ::Epoch) -> NonZeroU64 { + let len = self.total_nodes(epoch); + NonZeroU64::new(max((len as u64 * 9) / 10, ((len as u64 * 2) / 3) + 1)).unwrap() + } +} diff --git a/crates/hotshot/src/traits/election/static_committee.rs b/crates/hotshot/src/traits/election/static_committee.rs index fa904c66cf..d2b62f80b7 100644 --- a/crates/hotshot/src/traits/election/static_committee.rs +++ b/crates/hotshot/src/traits/election/static_committee.rs @@ -215,26 +215,23 @@ impl Membership for StaticCommittee { } /// Get the voting success threshold for the committee - fn success_threshold(&self) -> NonZeroU64 { + fn success_threshold(&self, _epoch: ::Epoch) -> NonZeroU64 { NonZeroU64::new(((self.stake_table.len() as u64 * 2) / 3) + 1).unwrap() } /// Get the voting success threshold for the committee - fn da_success_threshold(&self) -> NonZeroU64 { + fn da_success_threshold(&self, _epoch: ::Epoch) -> NonZeroU64 { NonZeroU64::new(((self.da_stake_table.len() as u64 * 2) / 3) + 1).unwrap() } /// Get the voting failure threshold for the committee - fn failure_threshold(&self) -> NonZeroU64 { + fn failure_threshold(&self, _epoch: ::Epoch) -> NonZeroU64 { NonZeroU64::new(((self.stake_table.len() as u64) / 3) + 1).unwrap() } /// Get the voting upgrade threshold for the committee - fn upgrade_threshold(&self) -> NonZeroU64 { - NonZeroU64::new(max( - (self.stake_table.len() as u64 * 9) / 10, - ((self.stake_table.len() as u64 * 2) / 3) + 1, - )) - .unwrap() + fn upgrade_threshold(&self, _epoch: ::Epoch) -> NonZeroU64 { + let len = self.stake_table.len(); + NonZeroU64::new(max((len as u64 * 9) / 10, ((len as u64 * 2) / 3) + 1)).unwrap() } } diff --git a/crates/hotshot/src/traits/election/static_committee_leader_two_views.rs b/crates/hotshot/src/traits/election/static_committee_leader_two_views.rs index 41ed1d046e..8833d06872 100644 --- a/crates/hotshot/src/traits/election/static_committee_leader_two_views.rs +++ b/crates/hotshot/src/traits/election/static_committee_leader_two_views.rs @@ -217,22 +217,22 @@ impl Membership for StaticCommitteeLeaderForTwoViews NonZeroU64 { + fn success_threshold(&self, _epoch: ::Epoch) -> NonZeroU64 { NonZeroU64::new(((self.stake_table.len() as u64 * 2) / 3) + 1).unwrap() } /// Get the voting success threshold for the committee - fn da_success_threshold(&self) -> NonZeroU64 { + fn da_success_threshold(&self, _epoch: ::Epoch) -> NonZeroU64 { NonZeroU64::new(((self.da_stake_table.len() as u64 * 2) / 3) + 1).unwrap() } /// Get the voting failure threshold for the committee - fn failure_threshold(&self) -> NonZeroU64 { + fn failure_threshold(&self, _epoch: ::Epoch) -> NonZeroU64 { NonZeroU64::new(((self.stake_table.len() as u64) / 3) + 1).unwrap() } /// Get the voting upgrade threshold for the committee - fn upgrade_threshold(&self) -> NonZeroU64 { + fn upgrade_threshold(&self, _epoch: ::Epoch) -> NonZeroU64 { NonZeroU64::new(((self.stake_table.len() as u64 * 9) / 10) + 1).unwrap() } } diff --git a/crates/hotshot/src/traits/networking/memory_network.rs b/crates/hotshot/src/traits/networking/memory_network.rs index d48f6f5f79..5925a85eff 100644 --- a/crates/hotshot/src/traits/networking/memory_network.rs +++ b/crates/hotshot/src/traits/networking/memory_network.rs @@ -88,7 +88,7 @@ struct MemoryNetworkInner { /// This provides an in memory simulation of a networking implementation, allowing nodes running on /// the same machine to mock networking while testing other functionality. /// -/// Under the hood, this simply maintains mpmc channels to every other `MemoryNetwork` insane of the +/// Under the hood, this simply maintains mpmc channels to every other `MemoryNetwork` instance of the /// same group. #[derive(Clone)] pub struct MemoryNetwork { @@ -297,22 +297,53 @@ impl ConnectedNetwork for MemoryNetwork { &self, message: Vec, recipients: Vec, - broadcast_delay: BroadcastDelay, + _broadcast_delay: BroadcastDelay, ) -> Result<(), NetworkError> { - // Iterate over all topics, compare to recipients, and get the `Topic` - let topic = self + trace!(?message, "Broadcasting message to DA"); + for node in self .inner .master_map .subscribed_map + .entry(Topic::Da) + .or_default() .iter() - .find(|v| v.value().iter().all(|(k, _)| recipients.contains(k))) - .map(|v| v.key().clone()) - .ok_or(NetworkError::MessageSendError( - "no topic found for recipients".to_string(), - ))?; - - self.broadcast_message(message, topic, broadcast_delay) - .await + { + if !recipients.contains(&node.0) { + tracing::error!("Skipping node because not in recipient list: {:?}", &node.0); + continue; + } + // TODO delay/drop etc here + let (key, node) = node; + trace!(?key, "Sending message to node"); + if let Some(ref config) = &self.inner.reliability_config { + { + let node2 = node.clone(); + let fut = config.chaos_send_msg( + message.clone(), + Arc::new(move |msg: Vec| { + let node3 = (node2).clone(); + boxed_sync(async move { + let _res = node3.input(msg).await; + // NOTE we're dropping metrics here but this is only for testing + // purposes. I think that should be okay + }) + }), + ); + spawn(fut); + } + } else { + let res = node.input(message.clone()).await; + match res { + Ok(()) => { + trace!(?key, "Delivered message to remote"); + } + Err(e) => { + warn!(?e, ?key, "Error sending broadcast message to node"); + } + } + } + } + Ok(()) } #[instrument(name = "MemoryNetwork::direct_message")] diff --git a/crates/hotshot/src/types/handle.rs b/crates/hotshot/src/types/handle.rs index 7b1fd5a424..9ea46b34d7 100644 --- a/crates/hotshot/src/types/handle.rs +++ b/crates/hotshot/src/types/handle.rs @@ -184,7 +184,6 @@ impl + 'static, V: Versions> .ok_or(anyhow!("Event dependency failed to get event"))?; // Then, if it's `Some`, make sure that the data is correct - if let HotShotEvent::QuorumProposalResponseRecv(quorum_proposal) = hs_event.as_ref() { // Make sure that the quorum_proposal is valid diff --git a/crates/libp2p-networking/Cargo.toml b/crates/libp2p-networking/Cargo.toml index edabb02097..98f1449508 100644 --- a/crates/libp2p-networking/Cargo.toml +++ b/crates/libp2p-networking/Cargo.toml @@ -33,6 +33,7 @@ rand = { workspace = true } serde = { workspace = true } tokio = { workspace = true } tracing = { workspace = true } +tracing-subscriber = { workspace = true } [lints] workspace = true diff --git a/crates/libp2p-networking/src/network/behaviours/dht/mod.rs b/crates/libp2p-networking/src/network/behaviours/dht/mod.rs index 3e1bc95673..f785d1fa10 100644 --- a/crates/libp2p-networking/src/network/behaviours/dht/mod.rs +++ b/crates/libp2p-networking/src/network/behaviours/dht/mod.rs @@ -28,7 +28,7 @@ use libp2p::kad::{ store::RecordStore, Behaviour as KademliaBehaviour, BootstrapError, Event as KademliaEvent, }; use libp2p_identity::PeerId; -use store::ValidatedStore; +use store::{file_backed::FileBackedStore, validated::ValidatedStore}; use tokio::{spawn, sync::mpsc::UnboundedSender, time::sleep}; use tracing::{debug, error, warn}; @@ -143,7 +143,7 @@ impl DHTBehaviour { /// print out the routing table to stderr pub fn print_routing_table( &mut self, - kadem: &mut KademliaBehaviour>, + kadem: &mut KademliaBehaviour>>, ) { let mut err = format!("KBUCKETS: PID: {:?}, ", self.peer_id); let v = kadem.kbuckets().collect::>(); @@ -179,7 +179,7 @@ impl DHTBehaviour { factor: NonZeroUsize, backoff: ExponentialBackoff, retry_count: u8, - kad: &mut KademliaBehaviour>, + kad: &mut KademliaBehaviour>>, ) { // noop if retry_count == 0 { @@ -247,7 +247,7 @@ impl DHTBehaviour { /// update state based on recv-ed get query fn handle_get_query( &mut self, - store: &mut ValidatedStore, + store: &mut FileBackedStore>, record_results: GetRecordResult, id: QueryId, mut last: bool, @@ -405,7 +405,7 @@ impl DHTBehaviour { pub fn dht_handle_event( &mut self, event: KademliaEvent, - store: &mut ValidatedStore, + store: &mut FileBackedStore>, ) -> Option { match event { KademliaEvent::OutboundQueryProgressed { diff --git a/crates/libp2p-networking/src/network/behaviours/dht/store/file_backed.rs b/crates/libp2p-networking/src/network/behaviours/dht/store/file_backed.rs new file mode 100644 index 0000000000..ba442d5a9b --- /dev/null +++ b/crates/libp2p-networking/src/network/behaviours/dht/store/file_backed.rs @@ -0,0 +1,426 @@ +//! This file contains the `FileBackedStore` struct, which is a wrapper around a `RecordStore` +//! that occasionally saves the DHT to a file on disk. + +use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH}; + +use anyhow::Context; +use delegate::delegate; +use libp2p::kad::store::{RecordStore, Result}; +use serde::{Deserialize, Serialize}; +use tracing::{debug, warn}; + +/// A `RecordStore` wrapper that occasionally saves the DHT to a file on disk. +pub struct FileBackedStore { + /// The underlying store + underlying_store: R, + + /// The path to the file + path: String, + + /// The maximum number of records that can be added to the store before the store is saved to a file + max_record_delta: u64, + + /// The running delta between the records in the file and the records in the underlying store + record_delta: u64, +} + +/// A serializable version of a Libp2p `Record` +#[derive(Serialize, Deserialize)] +pub struct SerializableRecord { + /// The key of the record + pub key: libp2p::kad::RecordKey, + /// The value of the record + pub value: Vec, + /// The (original) publisher of the record. + pub publisher: Option, + /// The record expiration time in seconds since the Unix epoch + /// + /// This is an approximation of the expiration time because we can't + /// serialize an `Instant` directly. + pub expires_unix_secs: Option, +} + +/// Approximate an `Instant` to the number of seconds since the Unix epoch +fn instant_to_unix_seconds(instant: Instant) -> anyhow::Result { + // Get the current instant and system time + let now_instant = Instant::now(); + let now_system = SystemTime::now(); + + // Get the duration of time between the instant and now + if instant > now_instant { + Ok(now_system + .checked_add(instant - now_instant) + .with_context(|| "Overflow when approximating expiration time")? + .duration_since(UNIX_EPOCH) + .with_context(|| "Failed to get duration since Unix epoch")? + .as_secs()) + } else { + Ok(now_system + .checked_sub(now_instant - instant) + .with_context(|| "Underflow when approximating expiration time")? + .duration_since(UNIX_EPOCH) + .with_context(|| "Failed to get duration since Unix epoch")? + .as_secs()) + } +} + +/// Convert a unix-second timestamp to an `Instant` +fn unix_seconds_to_instant(unix_secs: u64) -> anyhow::Result { + // Get the current instant and unix time + let now_instant = Instant::now(); + let unix_secs_now = SystemTime::now() + .duration_since(UNIX_EPOCH) + .with_context(|| "Failed to get duration since Unix epoch")? + .as_secs(); + + if unix_secs > unix_secs_now { + // If the instant is in the future, add the duration to the current time + now_instant + .checked_add(Duration::from_secs(unix_secs - unix_secs_now)) + .with_context(|| "Overflow when calculating future instant") + } else { + // If the instant is in the past, subtract the duration from the current time + now_instant + .checked_sub(Duration::from_secs(unix_secs_now - unix_secs)) + .with_context(|| "Underflow when calculating past instant") + } +} + +/// Allow conversion from a `libp2p::kad::Record` to a `SerializableRecord` +impl TryFrom for SerializableRecord { + type Error = anyhow::Error; + + fn try_from(record: libp2p::kad::Record) -> anyhow::Result { + Ok(SerializableRecord { + key: record.key, + value: record.value, + publisher: record.publisher, + expires_unix_secs: record.expires.map(instant_to_unix_seconds).transpose()?, + }) + } +} + +/// Allow conversion from a `SerializableRecord` to a `libp2p::kad::Record` +impl TryFrom for libp2p::kad::Record { + type Error = anyhow::Error; + + fn try_from(record: SerializableRecord) -> anyhow::Result { + Ok(libp2p::kad::Record { + key: record.key, + value: record.value, + publisher: record.publisher, + expires: record + .expires_unix_secs + .map(unix_seconds_to_instant) + .transpose()?, + }) + } +} + +impl FileBackedStore { + /// Create a new `FileBackedStore` with the given underlying store and path. + /// + /// `max_record_delta` is the maximum number of records that can be added to the store before + /// the store is saved to a file. + pub fn new(underlying_store: R, path: String, max_record_delta: u64) -> Self { + // Create the new store + let mut store = FileBackedStore { + underlying_store, + path: path.clone(), + max_record_delta, + record_delta: 0, + }; + + // Try to restore the DHT from a file. If it fails, warn and start with an empty store + if let Err(err) = store.restore_from_file(path) { + warn!( + "Failed to restore DHT from file: {:?}. Starting with empty store", + err + ); + } + + // Return the new store + store + } + + /// Attempt to save the DHT to the file at the given path + /// + /// # Errors + /// - If we fail to serialize the DHT + /// - If we fail to write the serialized DHT to the file + pub fn save_to_file(&mut self) -> anyhow::Result<()> { + debug!("Saving DHT to file"); + + // Get all records and convert them to their serializable counterparts + let serializable_records: Vec<_> = self + .underlying_store + .records() + .filter_map(|record| { + SerializableRecord::try_from(record.into_owned()) + .map_err(|err| { + warn!("Failed to convert record to serializable record: {:?}", err); + }) + .ok() + }) + .collect(); + + // Serialize the records + let contents = bincode::serialize(&serializable_records) + .with_context(|| "Failed to serialize records")?; + + // Write the contents to the file + std::fs::write(self.path.clone(), contents) + .with_context(|| "Failed to write DHT to file")?; + + debug!("Saved DHT to file"); + + Ok(()) + } + + /// Attempt to restore the DHT to the underlying store from the file at the given path + /// + /// # Errors + /// - If we fail to read the file + /// - If we fail to deserialize the file + pub fn restore_from_file(&mut self, path: String) -> anyhow::Result<()> { + debug!("Restoring DHT from file"); + + // Read the contents of the file as a `HashMap` of `Key` to `Vec` + let contents = std::fs::read(path).with_context(|| "Failed to read DHT file")?; + + // Convert the contents to a `HashMap` of `RecordKey` to `Vec` + let serializable_records: Vec = + bincode::deserialize(&contents).with_context(|| "Failed to parse DHT file")?; + + // Put all records into the new store + for serializable_record in serializable_records { + // Convert the serializable record back to a `libp2p::kad::Record` + match libp2p::kad::Record::try_from(serializable_record) { + Ok(record) => { + // Put the record into the new store + if let Err(err) = self.underlying_store.put(record) { + warn!("Failed to restore record from file: {:?}", err); + } + } + Err(err) => { + warn!("Failed to parse record from file: {:?}", err); + } + }; + } + + debug!("Restored DHT from file"); + + Ok(()) + } +} + +/// Implement the `RecordStore` trait for `FileBackedStore` +impl RecordStore for FileBackedStore { + type ProvidedIter<'a> + = R::ProvidedIter<'a> + where + R: 'a; + type RecordsIter<'a> + = R::RecordsIter<'a> + where + R: 'a; + + // Delegate all `RecordStore` methods except `put` to the inner store + delegate! { + to self.underlying_store { + fn add_provider(&mut self, record: libp2p::kad::ProviderRecord) -> libp2p::kad::store::Result<()>; + fn get(&self, k: &libp2p::kad::RecordKey) -> Option>; + fn provided(&self) -> Self::ProvidedIter<'_>; + fn providers(&self, key: &libp2p::kad::RecordKey) -> Vec; + fn records(&self) -> Self::RecordsIter<'_>; + fn remove_provider(&mut self, k: &libp2p::kad::RecordKey, p: &libp2p::PeerId); + } + } + + /// Overwrite the `put` method to potentially save the record to a file + fn put(&mut self, record: libp2p::kad::Record) -> Result<()> { + // Try to write to the underlying store + let result = self.underlying_store.put(record); + + // If the record was successfully written, update the record delta + if result.is_ok() { + self.record_delta += 1; + + // If the record delta is greater than the maximum record delta, try to save the file + if self.record_delta > self.max_record_delta { + if let Err(e) = self.save_to_file() { + warn!("Failed to save DHT to file: {:?}", e); + } + } + } + + result + } + + /// Overwrite the `remove` method to potentially remove the record from a file + fn remove(&mut self, k: &libp2p::kad::RecordKey) { + // Remove the record from the underlying store + self.underlying_store.remove(k); + + // Update the record delta + self.record_delta += 1; + + // If the record delta is greater than 10, try to save the file + if self.record_delta > 10 { + if let Err(e) = self.save_to_file() { + warn!("Failed to save DHT to file: {:?}", e); + } + } + } +} + +#[cfg(test)] +mod tests { + use libp2p::{ + kad::{store::MemoryStore, RecordKey}, + PeerId, + }; + use tracing_subscriber::EnvFilter; + + use super::*; + + #[test] + fn test_save_and_restore() { + // Try initializing tracing + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); + + // Create a test store + let mut store = FileBackedStore::new( + MemoryStore::new(PeerId::random()), + "/tmp/test.dht".to_string(), + 10, + ); + + // The key is a random 16-byte array + let key = RecordKey::new(&rand::random::<[u8; 16]>().to_vec()); + + // The value is a random 16-byte array + let random_value = rand::random::<[u8; 16]>(); + + // Put a record into the store + store + .put(libp2p::kad::Record::new(key.clone(), random_value.to_vec())) + .expect("Failed to put record into store"); + + // Save the store to a file + store.save_to_file().expect("Failed to save store to file"); + + // Create a new store from the file + let new_store = FileBackedStore::new( + MemoryStore::new(PeerId::random()), + "/tmp/test.dht".to_string(), + 10, + ); + + // Check that the new store has the record + let restored_record = new_store + .get(&key) + .expect("Failed to get record from store"); + + // Check that the restored record has the same value as the original record + assert_eq!(restored_record.value, random_value.to_vec()); + } + + #[test] + fn test_record_delta() { + // Try initializing tracing + let _ = tracing_subscriber::fmt() + .with_env_filter(EnvFilter::from_default_env()) + .try_init(); + + // Create a test store + let mut store = FileBackedStore::new( + MemoryStore::new(PeerId::random()), + "/tmp/test.dht".to_string(), + 10, + ); + + let mut keys = Vec::new(); + let mut values = Vec::new(); + + // Put 10 records into the store + for _ in 0..10 { + // Create a random key and value + let key = RecordKey::new(&rand::random::<[u8; 16]>().to_vec()); + let value = rand::random::<[u8; 16]>(); + + keys.push(key.clone()); + values.push(value); + + store + .put(libp2p::kad::Record::new(key, value.to_vec())) + .expect("Failed to put record into store"); + } + + // Create a new store from the allegedly unsaved file + let new_store = FileBackedStore::new( + MemoryStore::new(PeerId::random()), + "/tmp/test.dht".to_string(), + 10, + ); + + // Check that the new store has none of the records + for key in &keys { + assert!(new_store.get(key).is_none()); + } + + // Store one more record into the new store + store + .put(libp2p::kad::Record::new( + keys[0].clone(), + values[0].to_vec(), + )) + .expect("Failed to put record into store"); + + // Create a new store from the allegedly saved file + let new_store = FileBackedStore::new( + MemoryStore::new(PeerId::random()), + "/tmp/test.dht".to_string(), + 10, + ); + + // Check that the new store has all of the records + for (i, key) in keys.iter().enumerate() { + let restored_record = new_store.get(key).expect("Failed to get record from store"); + assert_eq!(restored_record.value, values[i]); + } + + // Check that the record delta is 0 + assert_eq!(new_store.record_delta, 0); + } + + #[test] + fn test_approximate_instant() { + // Create an expiry time in the future + let expiry_future = Instant::now() + Duration::from_secs(10); + + // Approximate the expiry time + let approximate_expiry = + unix_seconds_to_instant(instant_to_unix_seconds(expiry_future).unwrap()) + .unwrap() + .duration_since(Instant::now()); + + // Make sure it's close to 10 seconds in the future + assert!(approximate_expiry >= Duration::from_secs(9)); + assert!(approximate_expiry <= Duration::from_secs(11)); + + // Create an expiry time in the past + let expiry_past = Instant::now().checked_sub(Duration::from_secs(10)).unwrap(); + + // Approximate the expiry time + let approximate_expiry = + unix_seconds_to_instant(instant_to_unix_seconds(expiry_past).unwrap()).unwrap(); + let time_difference = approximate_expiry.elapsed(); + + // Make sure it's close to 10 seconds in the past + assert!(time_difference >= Duration::from_secs(9)); + assert!(time_difference <= Duration::from_secs(11)); + } +} diff --git a/crates/libp2p-networking/src/network/behaviours/dht/store/mod.rs b/crates/libp2p-networking/src/network/behaviours/dht/store/mod.rs new file mode 100644 index 0000000000..d9f42b8b1c --- /dev/null +++ b/crates/libp2p-networking/src/network/behaviours/dht/store/mod.rs @@ -0,0 +1,2 @@ +pub mod file_backed; +pub mod validated; diff --git a/crates/libp2p-networking/src/network/behaviours/dht/store.rs b/crates/libp2p-networking/src/network/behaviours/dht/store/validated.rs similarity index 98% rename from crates/libp2p-networking/src/network/behaviours/dht/store.rs rename to crates/libp2p-networking/src/network/behaviours/dht/store/validated.rs index cf5c22d61e..4a65f33b21 100644 --- a/crates/libp2p-networking/src/network/behaviours/dht/store.rs +++ b/crates/libp2p-networking/src/network/behaviours/dht/store/validated.rs @@ -10,8 +10,7 @@ use hotshot_types::traits::signature_key::SignatureKey; use libp2p::kad::store::{Error, RecordStore, Result}; use tracing::warn; -use super::record::RecordValue; -use crate::network::behaviours::dht::record::RecordKey; +use crate::network::behaviours::dht::record::{RecordKey, RecordValue}; /// A `RecordStore` wrapper that validates records before storing them. pub struct ValidatedStore { diff --git a/crates/libp2p-networking/src/network/def.rs b/crates/libp2p-networking/src/network/def.rs index 3f5189e3ac..a52fdae36c 100644 --- a/crates/libp2p-networking/src/network/def.rs +++ b/crates/libp2p-networking/src/network/def.rs @@ -17,7 +17,10 @@ use libp2p_identity::PeerId; use libp2p_swarm_derive::NetworkBehaviour; use tracing::{debug, error}; -use super::{behaviours::dht::store::ValidatedStore, cbor, NetworkEventInternal}; +use super::{ + behaviours::dht::store::{file_backed::FileBackedStore, validated::ValidatedStore}, + cbor, NetworkEventInternal, +}; /// Overarching network behaviour performing: /// - network topology discovery @@ -34,10 +37,10 @@ pub struct NetworkDef { #[debug(skip)] gossipsub: GossipBehaviour, - /// purpose: peer routing - /// purpose: storing pub key <-> peer id bijection + /// The DHT store. We use a `FileBackedStore` to occasionally save the DHT to + /// a file on disk and a `ValidatedStore` to validate the records stored. #[debug(skip)] - pub dht: libp2p::kad::Behaviour>, + pub dht: libp2p::kad::Behaviour>>, /// purpose: identifying the addresses from an outside POV #[debug(skip)] @@ -58,7 +61,7 @@ impl NetworkDef { #[must_use] pub fn new( gossipsub: GossipBehaviour, - dht: libp2p::kad::Behaviour>, + dht: libp2p::kad::Behaviour>>, identify: IdentifyBehaviour, direct_message: super::cbor::Behaviour, Vec>, autonat: autonat::Behaviour, diff --git a/crates/libp2p-networking/src/network/node.rs b/crates/libp2p-networking/src/network/node.rs index 05bacb310d..1ea25a36d0 100644 --- a/crates/libp2p-networking/src/network/node.rs +++ b/crates/libp2p-networking/src/network/node.rs @@ -59,7 +59,7 @@ pub use self::{ use super::{ behaviours::dht::{ bootstrap::{DHTBootstrapTask, InputEvent}, - store::ValidatedStore, + store::{file_backed::FileBackedStore, validated::ValidatedStore}, }, cbor::Cbor, gen_transport, BoxedTransport, ClientRequest, NetworkDef, NetworkError, NetworkEvent, @@ -253,9 +253,20 @@ impl NetworkNode { panic!("Replication factor not set"); } + // Extract the DHT file path from the config, defaulting to `libp2p_dht.json` + let dht_file_path = config + .dht_file_path + .clone() + .unwrap_or_else(|| "libp2p_dht.bin".into()); + + // Create the DHT behaviour let mut kadem = Behaviour::with_config( peer_id, - ValidatedStore::new(MemoryStore::new(peer_id)), + FileBackedStore::new( + ValidatedStore::new(MemoryStore::new(peer_id)), + dht_file_path, + 10, + ), kconfig, ); kadem.set_mode(Some(Mode::Server)); diff --git a/crates/libp2p-networking/src/network/node/config.rs b/crates/libp2p-networking/src/network/node/config.rs index d459ae0217..1f5422e321 100644 --- a/crates/libp2p-networking/src/network/node/config.rs +++ b/crates/libp2p-networking/src/network/node/config.rs @@ -51,6 +51,10 @@ pub struct NetworkNodeConfig { #[builder(default)] pub stake_table: Option, + /// The path to the file to save the DHT to + #[builder(default)] + pub dht_file_path: Option, + /// The signed authentication message sent to the remote peer /// If not supplied we will not send an authentication message during the handshake #[builder(default)] diff --git a/crates/macros/src/lib.rs b/crates/macros/src/lib.rs index d854593c85..c409cb6b58 100644 --- a/crates/macros/src/lib.rs +++ b/crates/macros/src/lib.rs @@ -11,25 +11,41 @@ use proc_macro2::TokenStream as TokenStream2; use quote::{format_ident, quote}; use syn::{ parse::{Parse, ParseStream, Result}, - parse_macro_input, Expr, ExprArray, ExprPath, ExprTuple, Ident, LitBool, Token, + parse_macro_input, + punctuated::Punctuated, + Expr, ExprArray, ExprPath, ExprTuple, Ident, LitBool, PathArguments, Token, TypePath, }; +/// Bracketed types, e.g. [A, B, C] +/// These types can have generic parameters, whereas ExprArray items must be Expr. +#[derive(derive_builder::Builder, Debug, Clone)] +struct TypePathBracketedArray { + /// elems + pub elems: Punctuated, +} + /// description of a crosstest #[derive(derive_builder::Builder, Debug, Clone)] struct CrossTestData { /// implementations impls: ExprArray, + /// builder impl #[builder(default = "syn::parse_str(\"[SimpleBuilderImplementation]\").unwrap()")] builder_impls: ExprArray, + /// versions versions: ExprArray, + /// types - types: ExprArray, + types: TypePathBracketedArray, + /// name of the test test_name: Ident, + /// test description/spec metadata: Expr, + /// whether or not to ignore ignore: LitBool, } @@ -51,17 +67,23 @@ impl CrossTestDataBuilder { #[derive(derive_builder::Builder, Debug, Clone)] struct TestData { /// type - ty: ExprPath, + ty: TypePath, + /// impl imply: ExprPath, + /// builder implementation builder_impl: ExprPath, + /// impl version: ExprPath, + /// name of test test_name: Ident, + /// test description metadata: Expr, + /// whether or not to ignore the test ignore: LitBool, } @@ -86,6 +108,58 @@ impl ToLowerSnakeStr for ExprPath { } } +impl ToLowerSnakeStr for syn::GenericArgument { + /// allow panic because this is a compiler error + #[allow(clippy::panic)] + fn to_lower_snake_str(&self) -> String { + match self { + syn::GenericArgument::Lifetime(l) => l.ident.to_string().to_lowercase(), + syn::GenericArgument::Type(t) => match t { + syn::Type::Path(p) => p.to_lower_snake_str(), + _ => { + panic!("Unexpected type for GenericArgument::Type: {t:?}"); + } + }, + syn::GenericArgument::Const(c) => match c { + syn::Expr::Lit(l) => match &l.lit { + syn::Lit::Str(v) => format!("{}_", v.value().to_lowercase()), + syn::Lit::Int(v) => format!("{}_", v.base10_digits()), + _ => { + panic!("Unexpected type for GenericArgument::Const::Lit: {l:?}"); + } + }, + _ => { + panic!("Unexpected type for GenericArgument::Const: {c:?}"); + } + }, + _ => { + panic!("Unexpected type for GenericArgument: {self:?}"); + } + } + } +} + +impl ToLowerSnakeStr for TypePath { + fn to_lower_snake_str(&self) -> String { + self.path + .segments + .iter() + .fold(String::new(), |mut acc, s| { + acc.push_str(&s.ident.to_string().to_lowercase()); + if let PathArguments::AngleBracketed(a) = &s.arguments { + acc.push('_'); + for arg in &a.args { + acc.push_str(&arg.to_lower_snake_str()); + } + } + + acc.push('_'); + acc + }) + .to_lowercase() + } +} + impl ToLowerSnakeStr for ExprTuple { /// allow panic because this is a compiler error #[allow(clippy::panic)] @@ -149,6 +223,28 @@ mod keywords { syn::custom_keyword!(Versions); } +impl Parse for TypePathBracketedArray { + /// allow panic because this is a compiler error + #[allow(clippy::panic)] + fn parse(input: ParseStream<'_>) -> Result { + let content; + syn::bracketed!(content in input); + let mut elems = Punctuated::new(); + + while !content.is_empty() { + let first: TypePath = content.parse()?; + elems.push_value(first); + if content.is_empty() { + break; + } + let punct = content.parse()?; + elems.push_punct(punct); + } + + Ok(Self { elems }) + } +} + impl Parse for CrossTestData { /// allow panic because this is a compiler error #[allow(clippy::panic)] @@ -159,7 +255,7 @@ impl Parse for CrossTestData { if input.peek(keywords::Types) { let _ = input.parse::()?; input.parse::()?; - let types = input.parse::()?; + let types = input.parse::()?; //ExprArray>()?; description.types(types); } else if input.peek(keywords::Impls) { let _ = input.parse::()?; @@ -216,13 +312,8 @@ fn cross_tests_internal(test_spec: CrossTestData) -> TokenStream { }; p }); - // - let types = test_spec.types.elems.iter().map(|t| { - let Expr::Path(p) = t else { - panic!("Expected Path for Type! Got {t:?}"); - }; - p - }); + + let types = test_spec.types.elems.iter(); let versions = test_spec.versions.elems.iter().map(|t| { let Expr::Path(p) = t else { diff --git a/crates/task-impls/src/consensus/handlers.rs b/crates/task-impls/src/consensus/handlers.rs index 684c3b6e34..27fc0a7b43 100644 --- a/crates/task-impls/src/consensus/handlers.rs +++ b/crates/task-impls/src/consensus/handlers.rs @@ -133,7 +133,15 @@ pub async fn send_high_qc task_state .membership .has_stake(&task_state.public_key, task_state.cur_epoch), - debug!("We were not chosen for the consensus committee for view {view_number:?}") + debug!( + "We were not chosen for the consensus committee for view {:?}", + view_number + ) ); let vote = TimeoutVote::create_signed_vote( diff --git a/crates/task-impls/src/da.rs b/crates/task-impls/src/da.rs index 68df39136f..923b15e010 100644 --- a/crates/task-impls/src/da.rs +++ b/crates/task-impls/src/da.rs @@ -12,11 +12,11 @@ use async_trait::async_trait; use hotshot_task::task::TaskState; use hotshot_types::{ consensus::{Consensus, OuterConsensus}, - data::{DaProposal, PackedBundle}, + data::{DaProposal2, PackedBundle}, event::{Event, EventType}, message::{Proposal, UpgradeLock}, - simple_certificate::DaCertificate, - simple_vote::{DaData, DaVote}, + simple_certificate::DaCertificate2, + simple_vote::{DaData2, DaVote2}, traits::{ block_contents::vid_commitment, election::Membership, @@ -61,7 +61,7 @@ pub struct DaTaskState, V: Version pub network: Arc, /// A map of `DaVote` collector tasks. - pub vote_collectors: VoteCollectorsMap, DaCertificate, V>, + pub vote_collectors: VoteCollectorsMap, DaCertificate2, V>, /// This Nodes public key pub public_key: TYPES::SignatureKey, @@ -120,7 +120,6 @@ impl, V: Versions> DaTaskState, V: Versions> DaTaskState { let cur_view = self.consensus.read().await.cur_view(); + let view_number = proposal.data.view_number(); + let epoch_number = proposal.data.epoch_number; + ensure!( - cur_view <= proposal.data.view_number() + 1, + cur_view <= view_number + 1, debug!( "Validated DA proposal for prior view but it's too old now Current view {:?}, DA Proposal view {:?}", cur_view, @@ -156,7 +158,7 @@ impl, V: Versions> DaTaskState, V: Versions> DaTaskState, V: Versions> DaTaskState, V: Versions> DaTaskState, V: Versions> DaTaskState = DaProposal { + let data: DaProposal2 = DaProposal2 { encoded_transactions: Arc::clone(encoded_transactions), metadata: metadata.clone(), // Upon entering a new view we want to send a DA Proposal for the next view -> Is it always the case that this is cur_view + 1? view_number, + epoch_number: *epoch_number, }; let message = Proposal { diff --git a/crates/task-impls/src/events.rs b/crates/task-impls/src/events.rs index 5153028e99..c75ef7c752 100644 --- a/crates/task-impls/src/events.rs +++ b/crates/task-impls/src/events.rs @@ -11,18 +11,18 @@ use either::Either; use hotshot_task::task::TaskEvent; use hotshot_types::{ data::{ - DaProposal, Leaf2, PackedBundle, QuorumProposal2, UpgradeProposal, VidDisperse, + DaProposal2, Leaf2, PackedBundle, QuorumProposal2, UpgradeProposal, VidDisperse, VidDisperseShare, }, message::Proposal, request_response::ProposalRequestPayload, simple_certificate::{ - DaCertificate, QuorumCertificate, QuorumCertificate2, TimeoutCertificate, - UpgradeCertificate, ViewSyncCommitCertificate2, ViewSyncFinalizeCertificate2, - ViewSyncPreCommitCertificate2, + DaCertificate2, QuorumCertificate, QuorumCertificate2, TimeoutCertificate, + UpgradeCertificate, ViewSyncCommitCertificate, ViewSyncFinalizeCertificate, + ViewSyncPreCommitCertificate, }, simple_vote::{ - DaVote, QuorumVote2, TimeoutVote, UpgradeVote, ViewSyncCommitVote, ViewSyncFinalizeVote, + DaVote2, QuorumVote2, TimeoutVote, UpgradeVote, ViewSyncCommitVote, ViewSyncFinalizeVote, ViewSyncPreCommitVote, }, traits::{ @@ -80,15 +80,15 @@ pub enum HotShotEvent { /// Send a timeout vote to the network; emitted by consensus task replicas TimeoutVoteSend(TimeoutVote), /// A DA proposal has been received from the network; handled by the DA task - DaProposalRecv(Proposal>, TYPES::SignatureKey), + DaProposalRecv(Proposal>, TYPES::SignatureKey), /// A DA proposal has been validated; handled by the DA task and VID task - DaProposalValidated(Proposal>, TYPES::SignatureKey), + DaProposalValidated(Proposal>, TYPES::SignatureKey), /// A DA vote has been received by the network; handled by the DA task - DaVoteRecv(DaVote), + DaVoteRecv(DaVote2), /// A Data Availability Certificate (DAC) has been received by the network; handled by the consensus task - DaCertificateRecv(DaCertificate), + DaCertificateRecv(DaCertificate2), /// A DAC is validated. - DaCertificateValidated(DaCertificate), + DaCertificateValidated(DaCertificate2), /// Send a quorum proposal to the network; emitted by the leader in the consensus task QuorumProposalSend(Proposal>, TYPES::SignatureKey), /// Send a quorum vote to the next leader; emitted by a replica in the consensus task after seeing a valid quorum proposal @@ -117,15 +117,15 @@ pub enum HotShotEvent { /// A quorum proposal was requested by a node for a view. QuorumProposalResponseRecv(Proposal>), /// Send a DA proposal to the DA committee; emitted by the DA leader (which is the same node as the leader of view v + 1) in the DA task - DaProposalSend(Proposal>, TYPES::SignatureKey), + DaProposalSend(Proposal>, TYPES::SignatureKey), /// Send a DA vote to the DA leader; emitted by DA committee members in the DA task after seeing a valid DA proposal - DaVoteSend(DaVote), + DaVoteSend(DaVote2), /// The next leader has collected enough votes to form a QC; emitted by the next leader in the consensus task; an internal event only QcFormed(Either, TimeoutCertificate>), /// The next leader has collected enough votes to form a QC; emitted by the next leader in the consensus task; an internal event only Qc2Formed(Either, TimeoutCertificate>), /// The DA leader has collected enough votes to form a DAC; emitted by the DA leader in the DA task; sent to the entire network via the networking task - DacSend(DaCertificate, TYPES::SignatureKey), + DacSend(DaCertificate2, TYPES::SignatureKey), /// The current view has changed; emitted by the replica in the consensus task or replica in the view sync task; received by almost all other tasks ViewChange(TYPES::View, TYPES::Epoch), /// Timeout for the view sync protocol; emitted by a replica in the view sync task @@ -145,19 +145,19 @@ pub enum HotShotEvent { /// Send a `ViewSyncFinalizeVote` from the network; emitted by a replica in the view sync task ViewSyncFinalizeVoteSend(ViewSyncFinalizeVote), - /// Receive a `ViewSyncPreCommitCertificate2` from the network; received by a replica in the view sync task - ViewSyncPreCommitCertificate2Recv(ViewSyncPreCommitCertificate2), - /// Receive a `ViewSyncCommitCertificate2` from the network; received by a replica in the view sync task - ViewSyncCommitCertificate2Recv(ViewSyncCommitCertificate2), - /// Receive a `ViewSyncFinalizeCertificate2` from the network; received by a replica in the view sync task - ViewSyncFinalizeCertificate2Recv(ViewSyncFinalizeCertificate2), + /// Receive a `ViewSyncPreCommitCertificate` from the network; received by a replica in the view sync task + ViewSyncPreCommitCertificateRecv(ViewSyncPreCommitCertificate), + /// Receive a `ViewSyncCommitCertificate` from the network; received by a replica in the view sync task + ViewSyncCommitCertificateRecv(ViewSyncCommitCertificate), + /// Receive a `ViewSyncFinalizeCertificate` from the network; received by a replica in the view sync task + ViewSyncFinalizeCertificateRecv(ViewSyncFinalizeCertificate), - /// Send a `ViewSyncPreCommitCertificate2` from the network; emitted by a relay in the view sync task - ViewSyncPreCommitCertificate2Send(ViewSyncPreCommitCertificate2, TYPES::SignatureKey), - /// Send a `ViewSyncCommitCertificate2` from the network; emitted by a relay in the view sync task - ViewSyncCommitCertificate2Send(ViewSyncCommitCertificate2, TYPES::SignatureKey), - /// Send a `ViewSyncFinalizeCertificate2` from the network; emitted by a relay in the view sync task - ViewSyncFinalizeCertificate2Send(ViewSyncFinalizeCertificate2, TYPES::SignatureKey), + /// Send a `ViewSyncPreCommitCertificate` from the network; emitted by a relay in the view sync task + ViewSyncPreCommitCertificateSend(ViewSyncPreCommitCertificate, TYPES::SignatureKey), + /// Send a `ViewSyncCommitCertificate` from the network; emitted by a relay in the view sync task + ViewSyncCommitCertificateSend(ViewSyncCommitCertificate, TYPES::SignatureKey), + /// Send a `ViewSyncFinalizeCertificate` from the network; emitted by a relay in the view sync task + ViewSyncFinalizeCertificateSend(ViewSyncFinalizeCertificate, TYPES::SignatureKey), /// Trigger the start of the view sync protocol; emitted by view sync task; internal trigger only ViewSyncTrigger(TYPES::View), @@ -242,7 +242,11 @@ pub enum HotShotEvent { HighQcRecv(QuorumCertificate2, TYPES::SignatureKey), /// Send our HighQc to the next leader, should go to the same leader as our vote - HighQcSend(QuorumCertificate2, TYPES::SignatureKey), + HighQcSend( + QuorumCertificate2, + TYPES::SignatureKey, + TYPES::SignatureKey, + ), } impl HotShotEvent { @@ -285,12 +289,12 @@ impl HotShotEvent { | HotShotEvent::ViewSyncPreCommitVoteSend(vote) => Some(vote.view_number()), HotShotEvent::ViewSyncFinalizeVoteRecv(vote) | HotShotEvent::ViewSyncFinalizeVoteSend(vote) => Some(vote.view_number()), - HotShotEvent::ViewSyncPreCommitCertificate2Recv(cert) - | HotShotEvent::ViewSyncPreCommitCertificate2Send(cert, _) => Some(cert.view_number()), - HotShotEvent::ViewSyncCommitCertificate2Recv(cert) - | HotShotEvent::ViewSyncCommitCertificate2Send(cert, _) => Some(cert.view_number()), - HotShotEvent::ViewSyncFinalizeCertificate2Recv(cert) - | HotShotEvent::ViewSyncFinalizeCertificate2Send(cert, _) => Some(cert.view_number()), + HotShotEvent::ViewSyncPreCommitCertificateRecv(cert) + | HotShotEvent::ViewSyncPreCommitCertificateSend(cert, _) => Some(cert.view_number()), + HotShotEvent::ViewSyncCommitCertificateRecv(cert) + | HotShotEvent::ViewSyncCommitCertificateSend(cert, _) => Some(cert.view_number()), + HotShotEvent::ViewSyncFinalizeCertificateRecv(cert) + | HotShotEvent::ViewSyncFinalizeCertificateSend(cert, _) => Some(cert.view_number()), HotShotEvent::SendPayloadCommitmentAndMetadata(_, _, _, view_number, _, _) => { Some(*view_number) } @@ -322,7 +326,7 @@ impl HotShotEvent { | HotShotEvent::VidRequestRecv(request, _) => Some(request.view), HotShotEvent::VidResponseSend(_, _, proposal) | HotShotEvent::VidResponseRecv(_, proposal) => Some(proposal.data.view_number), - HotShotEvent::HighQcRecv(qc, _) | HotShotEvent::HighQcSend(qc, _) => { + HotShotEvent::HighQcRecv(qc, _) | HotShotEvent::HighQcSend(qc, ..) => { Some(qc.view_number()) } } @@ -447,45 +451,45 @@ impl Display for HotShotEvent { "ViewSyncFinalizeVoteSend(view_number={:?})", vote.view_number() ), - HotShotEvent::ViewSyncPreCommitCertificate2Recv(cert) => { + HotShotEvent::ViewSyncPreCommitCertificateRecv(cert) => { write!( f, - "ViewSyncPreCommitCertificate2Recv(view_number={:?})", + "ViewSyncPreCommitCertificateRecv(view_number={:?})", cert.view_number() ) } - HotShotEvent::ViewSyncCommitCertificate2Recv(cert) => { + HotShotEvent::ViewSyncCommitCertificateRecv(cert) => { write!( f, - "ViewSyncCommitCertificate2Recv(view_number={:?})", + "ViewSyncCommitCertificateRecv(view_number={:?})", cert.view_number() ) } - HotShotEvent::ViewSyncFinalizeCertificate2Recv(cert) => { + HotShotEvent::ViewSyncFinalizeCertificateRecv(cert) => { write!( f, - "ViewSyncFinalizeCertificate2Recv(view_number={:?})", + "ViewSyncFinalizeCertificateRecv(view_number={:?})", cert.view_number() ) } - HotShotEvent::ViewSyncPreCommitCertificate2Send(cert, _) => { + HotShotEvent::ViewSyncPreCommitCertificateSend(cert, _) => { write!( f, - "ViewSyncPreCommitCertificate2Send(view_number={:?})", + "ViewSyncPreCommitCertificateSend(view_number={:?})", cert.view_number() ) } - HotShotEvent::ViewSyncCommitCertificate2Send(cert, _) => { + HotShotEvent::ViewSyncCommitCertificateSend(cert, _) => { write!( f, - "ViewSyncCommitCertificate2Send(view_number={:?})", + "ViewSyncCommitCertificateSend(view_number={:?})", cert.view_number() ) } - HotShotEvent::ViewSyncFinalizeCertificate2Send(cert, _) => { + HotShotEvent::ViewSyncFinalizeCertificateSend(cert, _) => { write!( f, - "ViewSyncFinalizeCertificate2Send(view_number={:?})", + "ViewSyncFinalizeCertificateSend(view_number={:?})", cert.view_number() ) } @@ -590,7 +594,7 @@ impl Display for HotShotEvent { HotShotEvent::HighQcRecv(qc, _) => { write!(f, "HighQcRecv(view_number={:?}", qc.view_number()) } - HotShotEvent::HighQcSend(qc, _) => { + HotShotEvent::HighQcSend(qc, ..) => { write!(f, "HighQcSend(view_number={:?}", qc.view_number()) } } diff --git a/crates/task-impls/src/helpers.rs b/crates/task-impls/src/helpers.rs index e04a808ca3..242bf3db0d 100644 --- a/crates/task-impls/src/helpers.rs +++ b/crates/task-impls/src/helpers.rs @@ -127,7 +127,7 @@ pub(crate) async fn fetch_proposal( if !justify_qc .is_valid_cert( quorum_membership.stake_table(cur_epoch), - quorum_membership.success_threshold(), + quorum_membership.success_threshold(cur_epoch), upgrade_lock, ) .await @@ -686,7 +686,9 @@ pub(crate) async fn validate_proposal_view_and_certs< validation_info .quorum_membership .stake_table(validation_info.cur_epoch), - validation_info.quorum_membership.success_threshold(), + validation_info + .quorum_membership + .success_threshold(validation_info.cur_epoch), &validation_info.upgrade_lock ) .await, @@ -709,7 +711,9 @@ pub(crate) async fn validate_proposal_view_and_certs< validation_info .quorum_membership .stake_table(validation_info.cur_epoch), - validation_info.quorum_membership.success_threshold(), + validation_info + .quorum_membership + .success_threshold(validation_info.cur_epoch), &validation_info.upgrade_lock ) .await, diff --git a/crates/task-impls/src/network.rs b/crates/task-impls/src/network.rs index 7cc8dceca0..b885e2f7e5 100644 --- a/crates/task-impls/src/network.rs +++ b/crates/task-impls/src/network.rs @@ -36,6 +36,7 @@ use hotshot_types::{ use tokio::{spawn, task::JoinHandle}; use tracing::instrument; use utils::anytrace::*; +use vbs::version::StaticVersionType; use crate::{ events::{HotShotEvent, HotShotTaskCompleted}, @@ -88,22 +89,20 @@ impl NetworkMessageTaskState { } GeneralConsensusMessage::ViewSyncPreCommitCertificate( view_sync_message, - ) => HotShotEvent::ViewSyncPreCommitCertificate2Recv(view_sync_message), + ) => HotShotEvent::ViewSyncPreCommitCertificateRecv(view_sync_message), GeneralConsensusMessage::ViewSyncCommitVote(view_sync_message) => { HotShotEvent::ViewSyncCommitVoteRecv(view_sync_message) } GeneralConsensusMessage::ViewSyncCommitCertificate(view_sync_message) => { - HotShotEvent::ViewSyncCommitCertificate2Recv(view_sync_message) + HotShotEvent::ViewSyncCommitCertificateRecv(view_sync_message) } - GeneralConsensusMessage::ViewSyncFinalizeVote(view_sync_message) => { HotShotEvent::ViewSyncFinalizeVoteRecv(view_sync_message) } GeneralConsensusMessage::ViewSyncFinalizeCertificate(view_sync_message) => { - HotShotEvent::ViewSyncFinalizeCertificate2Recv(view_sync_message) + HotShotEvent::ViewSyncFinalizeCertificateRecv(view_sync_message) } - GeneralConsensusMessage::TimeoutVote(message) => { HotShotEvent::TimeoutVoteRecv(message) } @@ -114,21 +113,35 @@ impl NetworkMessageTaskState { tracing::error!("Received upgrade vote!"); HotShotEvent::UpgradeVoteRecv(message) } - GeneralConsensusMessage::HighQc(qc) => { - HotShotEvent::HighQcRecv(qc.to_qc2(), sender) + GeneralConsensusMessage::HighQc(qc) => HotShotEvent::HighQcRecv(qc, sender), + GeneralConsensusMessage::Proposal2(proposal) => { + HotShotEvent::QuorumProposalRecv(proposal, sender) + } + GeneralConsensusMessage::Vote2(vote) => HotShotEvent::QuorumVoteRecv(vote), + GeneralConsensusMessage::Proposal2Response(proposal) => { + HotShotEvent::QuorumProposalResponseRecv(proposal) } }, SequencingMessage::Da(da_message) => match da_message { DaConsensusMessage::DaProposal(proposal) => { - HotShotEvent::DaProposalRecv(proposal, sender) + HotShotEvent::DaProposalRecv(convert_proposal(proposal), sender) + } + DaConsensusMessage::DaVote(vote) => { + HotShotEvent::DaVoteRecv(vote.clone().to_vote2()) } - DaConsensusMessage::DaVote(vote) => HotShotEvent::DaVoteRecv(vote.clone()), DaConsensusMessage::DaCertificate(cert) => { - HotShotEvent::DaCertificateRecv(cert) + HotShotEvent::DaCertificateRecv(cert.to_dac2()) } DaConsensusMessage::VidDisperseMsg(proposal) => { HotShotEvent::VidShareRecv(sender, proposal) } + DaConsensusMessage::DaProposal2(proposal) => { + HotShotEvent::DaProposalRecv(proposal, sender) + } + DaConsensusMessage::DaVote2(vote) => HotShotEvent::DaVoteRecv(vote.clone()), + DaConsensusMessage::DaCertificate2(cert) => { + HotShotEvent::DaCertificateRecv(cert) + } }, }; broadcast_event(Arc::new(event), &self.internal_event_stream).await; @@ -376,13 +389,23 @@ impl< match event.as_ref().clone() { HotShotEvent::QuorumProposalSend(proposal, sender) => { *maybe_action = Some(HotShotAction::Propose); - Some(( - sender, + + let message = if self + .upgrade_lock + .version_infallible(proposal.data.view_number()) + .await + >= V::Epochs::VERSION + { + MessageKind::::from_consensus_message(SequencingMessage::General( + GeneralConsensusMessage::Proposal2(proposal), + )) + } else { MessageKind::::from_consensus_message(SequencingMessage::General( GeneralConsensusMessage::Proposal(convert_proposal(proposal)), - )), - TransmitType::Broadcast, - )) + )) + }; + + Some((sender, message, TransmitType::Broadcast)) } // ED Each network task is subscribed to all these message types. Need filters per network task @@ -401,23 +424,41 @@ impl< } }; - Some(( - vote.signing_key(), + let message = if self + .upgrade_lock + .version_infallible(vote.view_number()) + .await + >= V::Epochs::VERSION + { + MessageKind::::from_consensus_message(SequencingMessage::General( + GeneralConsensusMessage::Vote2(vote.clone()), + )) + } else { MessageKind::::from_consensus_message(SequencingMessage::General( GeneralConsensusMessage::Vote(vote.clone().to_vote()), - )), - TransmitType::Direct(leader), - )) + )) + }; + + Some((vote.signing_key(), message, TransmitType::Direct(leader))) } HotShotEvent::ExtendedQuorumVoteSend(vote) => { *maybe_action = Some(HotShotAction::Vote); - Some(( - vote.signing_key(), + let message = if self + .upgrade_lock + .version_infallible(vote.view_number()) + .await + >= V::Epochs::VERSION + { + MessageKind::::from_consensus_message(SequencingMessage::General( + GeneralConsensusMessage::Vote2(vote.clone()), + )) + } else { MessageKind::::from_consensus_message(SequencingMessage::General( GeneralConsensusMessage::Vote(vote.clone().to_vote()), - )), - TransmitType::Broadcast, - )) + )) + }; + + Some((vote.signing_key(), message, TransmitType::Broadcast)) } HotShotEvent::QuorumProposalRequestSend(req, signature) => Some(( req.key.clone(), @@ -439,13 +480,23 @@ impl< } HotShotEvent::DaProposalSend(proposal, sender) => { *maybe_action = Some(HotShotAction::DaPropose); - Some(( - sender, + + let message = if self + .upgrade_lock + .version_infallible(proposal.data.view_number()) + .await + >= V::Epochs::VERSION + { MessageKind::::from_consensus_message(SequencingMessage::Da( - DaConsensusMessage::DaProposal(proposal), - )), - TransmitType::DaCommitteeBroadcast, - )) + DaConsensusMessage::DaProposal2(proposal), + )) + } else { + MessageKind::::from_consensus_message(SequencingMessage::Da( + DaConsensusMessage::DaProposal(convert_proposal(proposal)), + )) + }; + + Some((sender, message, TransmitType::DaCommitteeBroadcast)) } HotShotEvent::DaVoteSend(vote) => { *maybe_action = Some(HotShotAction::DaVote); @@ -462,23 +513,38 @@ impl< } }; - Some(( - vote.signing_key(), + let message = if self.upgrade_lock.version_infallible(view_number).await + >= V::Epochs::VERSION + { MessageKind::::from_consensus_message(SequencingMessage::Da( - DaConsensusMessage::DaVote(vote.clone()), - )), - TransmitType::Direct(leader), - )) + DaConsensusMessage::DaVote2(vote.clone()), + )) + } else { + MessageKind::::from_consensus_message(SequencingMessage::Da( + DaConsensusMessage::DaVote(vote.clone().to_vote()), + )) + }; + + Some((vote.signing_key(), message, TransmitType::Direct(leader))) } HotShotEvent::DacSend(certificate, sender) => { *maybe_action = Some(HotShotAction::DaCert); - Some(( - sender, + let message = if self + .upgrade_lock + .version_infallible(certificate.view_number()) + .await + >= V::Epochs::VERSION + { MessageKind::::from_consensus_message(SequencingMessage::Da( - DaConsensusMessage::DaCertificate(certificate), - )), - TransmitType::Broadcast, - )) + DaConsensusMessage::DaCertificate2(certificate), + )) + } else { + MessageKind::::from_consensus_message(SequencingMessage::Da( + DaConsensusMessage::DaCertificate(certificate.to_dac()), + )) + }; + + Some((sender, message, TransmitType::Broadcast)) } HotShotEvent::ViewSyncPreCommitVoteSend(vote) => { let view_number = vote.view_number() + vote.date().relay; @@ -548,21 +614,21 @@ impl< TransmitType::Direct(leader), )) } - HotShotEvent::ViewSyncPreCommitCertificate2Send(certificate, sender) => Some(( + HotShotEvent::ViewSyncPreCommitCertificateSend(certificate, sender) => Some(( sender, MessageKind::::from_consensus_message(SequencingMessage::General( GeneralConsensusMessage::ViewSyncPreCommitCertificate(certificate), )), TransmitType::Broadcast, )), - HotShotEvent::ViewSyncCommitCertificate2Send(certificate, sender) => Some(( + HotShotEvent::ViewSyncCommitCertificateSend(certificate, sender) => Some(( sender, MessageKind::::from_consensus_message(SequencingMessage::General( GeneralConsensusMessage::ViewSyncCommitCertificate(certificate), )), TransmitType::Broadcast, )), - HotShotEvent::ViewSyncFinalizeCertificate2Send(certificate, sender) => Some(( + HotShotEvent::ViewSyncFinalizeCertificateSend(certificate, sender) => Some(( sender, MessageKind::::from_consensus_message(SequencingMessage::General( GeneralConsensusMessage::ViewSyncFinalizeCertificate(certificate), @@ -650,6 +716,13 @@ impl< TransmitType::Direct(to), )) } + HotShotEvent::HighQcSend(quorum_cert, leader, sender) => Some(( + sender, + MessageKind::Consensus(SequencingMessage::General( + GeneralConsensusMessage::HighQc(quorum_cert), + )), + TransmitType::Direct(leader), + )), _ => None, } } diff --git a/crates/task-impls/src/quorum_proposal/handlers.rs b/crates/task-impls/src/quorum_proposal/handlers.rs index 55f283bcd8..e8b124b1e2 100644 --- a/crates/task-impls/src/quorum_proposal/handlers.rs +++ b/crates/task-impls/src/quorum_proposal/handlers.rs @@ -20,7 +20,8 @@ use committable::Committable; use hotshot_task::dependency_task::HandleDepOutput; use hotshot_types::{ consensus::{CommitmentAndMetadata, OuterConsensus}, - data::{Leaf2, QuorumProposal, VidDisperse, ViewChangeEvidence}, + data::{Leaf2, QuorumProposal2, VidDisperse, ViewChangeEvidence}, + drb::{INITIAL_DRB_RESULT, INITIAL_DRB_SEED_INPUT}, message::Proposal, simple_certificate::{QuorumCertificate2, UpgradeCertificate}, traits::{ @@ -50,7 +51,7 @@ pub(crate) enum ProposalDependency { /// For the `Qc2Formed` event. Qc, - /// For the `ViewSyncFinalizeCertificate2Recv` event. + /// For the `ViewSyncFinalizeCertificateRecv` event. ViewSyncCert, /// For the `Qc2Formed` event timeout branch. @@ -128,7 +129,8 @@ impl ProposalDependencyHandle { // TODO take epoch from `qc` // https://github.com/EspressoSystems/HotShot/issues/3917 self.quorum_membership.stake_table(TYPES::Epoch::new(0)), - self.quorum_membership.success_threshold(), + self.quorum_membership + .success_threshold(TYPES::Epoch::new(0)), &self.upgrade_lock, ) .await @@ -295,14 +297,15 @@ impl ProposalDependencyHandle { .context(warn!("Failed to construct marketplace block header"))? }; - let proposal = QuorumProposal { + let proposal = QuorumProposal2 { block_header, view_number: self.view_number, - justify_qc: parent_qc.to_qc(), + justify_qc: parent_qc, upgrade_certificate, - proposal_certificate, - } - .into(); + view_change_evidence: proposal_certificate, + drb_seed: INITIAL_DRB_SEED_INPUT, + drb_result: INITIAL_DRB_RESULT, + }; let proposed_leaf = Leaf2::from_quorum_proposal(&proposal); ensure!( @@ -375,7 +378,7 @@ impl HandleDepOutput for ProposalDependencyHandle< parent_qc = Some(qc.clone()); } }, - HotShotEvent::ViewSyncFinalizeCertificate2Recv(cert) => { + HotShotEvent::ViewSyncFinalizeCertificateRecv(cert) => { view_sync_finalize_cert = Some(cert.clone()); } HotShotEvent::VidDisperseSend(share, _) => { diff --git a/crates/task-impls/src/quorum_proposal/mod.rs b/crates/task-impls/src/quorum_proposal/mod.rs index d226677eb1..49875c6480 100644 --- a/crates/task-impls/src/quorum_proposal/mod.rs +++ b/crates/task-impls/src/quorum_proposal/mod.rs @@ -117,8 +117,7 @@ impl, V: Versions> } } ProposalDependency::ViewSyncCert => { - if let HotShotEvent::ViewSyncFinalizeCertificate2Recv(view_sync_cert) = - event + if let HotShotEvent::ViewSyncFinalizeCertificateRecv(view_sync_cert) = event { view_sync_cert.view_number() } else { @@ -225,7 +224,7 @@ impl, V: Versions> qc_dependency.mark_as_completed(event); } }, - HotShotEvent::ViewSyncFinalizeCertificate2Recv(_) => { + HotShotEvent::ViewSyncFinalizeCertificateRecv(_) => { view_sync_dependency.mark_as_completed(event); } HotShotEvent::VidDisperseSend(_, _) => { @@ -437,14 +436,14 @@ impl, V: Versions> Arc::clone(&event), )?; } - HotShotEvent::ViewSyncFinalizeCertificate2Recv(certificate) => { + HotShotEvent::ViewSyncFinalizeCertificateRecv(certificate) => { let epoch_number = self.consensus.read().await.cur_epoch(); ensure!( certificate .is_valid_cert( self.quorum_membership.stake_table(epoch_number), - self.quorum_membership.success_threshold(), + self.quorum_membership.success_threshold(epoch_number), &self.upgrade_lock ) .await, @@ -502,17 +501,17 @@ impl, V: Versions> HotShotEvent::ViewChange(view, _) | HotShotEvent::Timeout(view) => { self.cancel_tasks(*view); } - HotShotEvent::HighQcSend(qc, _sender) => { + HotShotEvent::HighQcSend(qc, ..) => { ensure!(qc.view_number() > self.highest_qc.view_number()); let epoch_number = self.consensus.read().await.cur_epoch(); ensure!( qc.is_valid_cert( self.quorum_membership.stake_table(epoch_number), - self.quorum_membership.success_threshold(), + self.quorum_membership.success_threshold(epoch_number), &self.upgrade_lock ) .await, - warn!("Qurom certificate {:?} was invalid", qc.data()) + warn!("Quorum certificate {:?} was invalid", qc.data()) ); self.highest_qc = qc.clone(); } diff --git a/crates/task-impls/src/quorum_proposal_recv/handlers.rs b/crates/task-impls/src/quorum_proposal_recv/handlers.rs index 73f6addef0..528fc04562 100644 --- a/crates/task-impls/src/quorum_proposal_recv/handlers.rs +++ b/crates/task-impls/src/quorum_proposal_recv/handlers.rs @@ -156,8 +156,10 @@ pub(crate) async fn handle_quorum_proposal_recv< .is_valid_cert( validation_info .quorum_membership - .stake_table(validation_info.cur_epoch), - validation_info.quorum_membership.success_threshold(), + .stake_table(justify_qc.data.epoch), + validation_info + .quorum_membership + .success_threshold(justify_qc.data.epoch), &validation_info.upgrade_lock, ) .await diff --git a/crates/task-impls/src/quorum_vote/drb_computations.rs b/crates/task-impls/src/quorum_vote/drb_computations.rs new file mode 100644 index 0000000000..fc5483f8dd --- /dev/null +++ b/crates/task-impls/src/quorum_vote/drb_computations.rs @@ -0,0 +1,126 @@ +use std::collections::{btree_map, BTreeMap}; + +use hotshot_types::{ + drb::{compute_drb_result, DrbResult, DrbSeedInput}, + traits::node_implementation::{ConsensusTime, NodeType}, +}; +use tokio::{spawn, task::JoinHandle}; + +/// Number of previous results and seeds to keep +pub const KEEP_PREVIOUS_RESULT_COUNT: u64 = 8; + +/// Helper struct to track state of DRB computations +pub struct DrbComputations { + /// Stored results from computations + results: BTreeMap, + + /// Currently live computation + task: Option<(TYPES::Epoch, JoinHandle)>, + + /// Stored inputs to computations + seeds: BTreeMap, +} + +impl DrbComputations { + #[must_use] + /// Create a new DrbComputations + pub fn new() -> Self { + Self { + results: BTreeMap::new(), + task: None, + seeds: BTreeMap::new(), + } + } + + /// If a task is currently live AND has finished, join it and save the result. + /// If the epoch for the calculation was the same as the provided epoch, return true + /// If a task is currently live and NOT finished, abort it UNLESS the task epoch is the same as + /// cur_epoch, in which case keep letting it run and return true. + /// Return false if a task should be spawned for the given epoch. + async fn join_or_abort_old_task(&mut self, epoch: TYPES::Epoch) -> bool { + if let Some((task_epoch, join_handle)) = &mut self.task { + if join_handle.is_finished() { + match join_handle.await { + Ok(result) => { + self.results.insert(*task_epoch, result); + let result = *task_epoch == epoch; + self.task = None; + result + } + Err(e) => { + tracing::error!("error joining DRB computation task: {e:?}"); + false + } + } + } else if *task_epoch == epoch { + true + } else { + join_handle.abort(); + self.task = None; + false + } + } else { + false + } + } + + /// Stores a seed for a particular epoch for later use by start_task_if_not_running, called from handle_quorum_proposal_validated_drb_calculation_start + pub fn store_seed(&mut self, epoch: TYPES::Epoch, drb_seed_input: DrbSeedInput) { + self.seeds.insert(epoch, drb_seed_input); + } + + /// Starts a new task. Cancels a current task if that task is not for the provided epoch. Allows a task to continue + /// running if it was already started for the given epoch. Avoids running the task if we already have a result for + /// the epoch. + pub async fn start_task_if_not_running(&mut self, epoch: TYPES::Epoch) { + // If join_or_abort_task returns true, then we either just completed a task for this epoch, or we currently + // have a running task for the epoch. + if self.join_or_abort_old_task(epoch).await { + return; + } + + // In case we somehow ended up processing this epoch already, don't start it again + if self.results.contains_key(&epoch) { + return; + } + + if let btree_map::Entry::Occupied(entry) = self.seeds.entry(epoch) { + let drb_seed_input = *entry.get(); + let new_drb_task = spawn(async move { compute_drb_result::(drb_seed_input) }); + self.task = Some((epoch, new_drb_task)); + entry.remove(); + } + } + + /// Retrieves the result for a given epoch + pub fn get_result(&self, epoch: TYPES::Epoch) -> Option { + self.results.get(&epoch).copied() + } + + /// Retrieves the seed for a given epoch + pub fn get_seed(&self, epoch: TYPES::Epoch) -> Option { + self.seeds.get(&epoch).copied() + } + + /// Garbage collects internal data structures + pub fn garbage_collect(&mut self, epoch: TYPES::Epoch) { + if epoch.u64() < KEEP_PREVIOUS_RESULT_COUNT { + return; + } + + let retain_epoch = epoch - KEEP_PREVIOUS_RESULT_COUNT; + // N.B. x.split_off(y) returns the part of the map where key >= y + + // Remove result entries older than EPOCH + self.results = self.results.split_off(&retain_epoch); + + // Remove result entries older than EPOCH+1 + self.seeds = self.seeds.split_off(&(retain_epoch + 1)); + } +} + +impl Default for DrbComputations { + fn default() -> Self { + Self::new() + } +} diff --git a/crates/task-impls/src/quorum_vote/handlers.rs b/crates/task-impls/src/quorum_vote/handlers.rs index d585259385..c434352ba1 100644 --- a/crates/task-impls/src/quorum_vote/handlers.rs +++ b/crates/task-impls/src/quorum_vote/handlers.rs @@ -13,16 +13,18 @@ use committable::Committable; use hotshot_types::{ consensus::OuterConsensus, data::{Leaf2, QuorumProposal2, VidDisperseShare}, - event::{Event, EventType}, + event::{Event, EventType, LeafInfo}, message::{Proposal, UpgradeLock}, simple_vote::{QuorumData2, QuorumVote2}, traits::{ + block_contents::BlockHeader, election::Membership, node_implementation::{ConsensusTime, NodeImplementation, NodeType}, signature_key::SignatureKey, storage::Storage, ValidatedState, }, + utils::epoch_from_block_number, vote::HasViewNumber, }; use tracing::instrument; @@ -39,6 +41,96 @@ use crate::{ quorum_vote::Versions, }; +/// Handles starting the DRB calculation. Uses the seed previously stored in +/// handle_quorum_proposal_validated_drb_calculation_seed +async fn handle_quorum_proposal_validated_drb_calculation_start< + TYPES: NodeType, + I: NodeImplementation, + V: Versions, +>( + proposal: &QuorumProposal2, + task_state: &mut QuorumVoteTaskState, +) { + let current_epoch_number = TYPES::Epoch::new(epoch_from_block_number( + proposal.block_header.block_number(), + task_state.epoch_height, + )); + + // Start the new task if we're in the committee for this epoch + if task_state + .membership + .has_stake(&task_state.public_key, current_epoch_number) + { + task_state + .drb_computations + .start_task_if_not_running(current_epoch_number + 1) + .await; + } +} + +/// Handles storing the seed for an upcoming DRB calculation. +/// +/// We store the DRB computation seed 2 epochs in advance, if the decided block is the last but +/// third block in the current epoch and we are in the quorum committee of the next epoch. +/// +/// Special cases: +/// * Epoch 0: No DRB computation since we'll transition to epoch 1 immediately. +/// * Epoch 1 and 2: Use `[0u8; 32]` as the DRB result since when we first start the +/// computation in epoch 1, the result is for epoch 3. +/// +/// We don't need to handle the special cases explicitly here, because the first proposal +/// with which we'll start the DRB computation is for epoch 3. +fn handle_quorum_proposal_validated_drb_calculation_seed< + TYPES: NodeType, + I: NodeImplementation, + V: Versions, +>( + proposal: &QuorumProposal2, + task_state: &mut QuorumVoteTaskState, + leaf_views: &[LeafInfo], +) -> Result<()> { + // This is never none if we've reached a new decide, so this is safe to unwrap. + let decided_block_number = leaf_views + .last() + .unwrap() + .leaf + .block_header() + .block_number(); + + // Skip if this is not the expected block. + if task_state.epoch_height != 0 && (decided_block_number + 3) % task_state.epoch_height == 0 { + // Cancel old DRB computation tasks. + let current_epoch_number = TYPES::Epoch::new(epoch_from_block_number( + decided_block_number, + task_state.epoch_height, + )); + + task_state + .drb_computations + .garbage_collect(current_epoch_number); + + // Skip if we are not in the committee of the next epoch. + if task_state + .membership + .has_stake(&task_state.public_key, current_epoch_number + 1) + { + let new_epoch_number = current_epoch_number + 2; + let Ok(drb_seed_input_vec) = bincode::serialize(&proposal.justify_qc.signatures) else { + bail!("Failed to serialize the QC signature."); + }; + let Ok(drb_seed_input) = drb_seed_input_vec.try_into() else { + bail!("Failed to convert the serialized QC signature into a DRB seed input."); + }; + + // Store the drb seed input for the next calculation + task_state + .drb_computations + .store_seed(new_epoch_number, drb_seed_input); + } + } + Ok(()) +} + /// Handles the `QuorumProposalValidated` event. #[instrument(skip_all, fields(id = task_state.id, view = *proposal.view_number))] pub(crate) async fn handle_quorum_proposal_validated< @@ -54,6 +146,10 @@ pub(crate) async fn handle_quorum_proposal_validated< .version(proposal.view_number()) .await?; + if version >= V::Epochs::VERSION { + handle_quorum_proposal_validated_drb_calculation_start(proposal, task_state).await; + } + let LeafChainTraversalOutcome { new_locked_view_number, new_decided_view_number, @@ -135,13 +231,13 @@ pub(crate) async fn handle_quorum_proposal_validated< // We don't need to hold this while we broadcast drop(consensus_writer); - // First, send an update to everyone saying that we've reached a decide + // Send an update to everyone saying that we've reached a decide broadcast_event( Event { view_number: decided_view_number, event: EventType::Decide { - leaf_chain: Arc::new(leaf_views), - // This is never *not* none if we've reached a new decide, so this is safe to unwrap. + leaf_chain: Arc::new(leaf_views.clone()), + // This is never none if we've reached a new decide, so this is safe to unwrap. qc: Arc::new(new_decide_qc.unwrap()), block_size: included_txns.map(|txns| txns.len().try_into().unwrap()), }, @@ -150,6 +246,14 @@ pub(crate) async fn handle_quorum_proposal_validated< ) .await; tracing::debug!("Successfully sent decide event"); + + if version >= V::Epochs::VERSION { + handle_quorum_proposal_validated_drb_calculation_seed( + proposal, + task_state, + &leaf_views, + )?; + } } Ok(()) @@ -295,12 +399,17 @@ pub(crate) async fn submit_vote, V private_key: ::PrivateKey, upgrade_lock: UpgradeLock, view_number: TYPES::View, - epoch_number: TYPES::Epoch, + epoch_height: u64, storage: Arc>, leaf: Leaf2, vid_share: Proposal>, extended_vote: bool, ) -> Result<()> { + let epoch_number = TYPES::Epoch::new(epoch_from_block_number( + leaf.block_header().block_number(), + epoch_height, + )); + ensure!( quorum_membership.has_stake(&public_key, epoch_number), info!( @@ -313,6 +422,7 @@ pub(crate) async fn submit_vote, V let vote = QuorumVote2::::create_signed_vote( QuorumData2 { leaf_commit: leaf.commit(), + epoch: epoch_number, }, view_number, &public_key, diff --git a/crates/task-impls/src/quorum_vote/mod.rs b/crates/task-impls/src/quorum_vote/mod.rs index d1079197f8..5f2b1df891 100644 --- a/crates/task-impls/src/quorum_vote/mod.rs +++ b/crates/task-impls/src/quorum_vote/mod.rs @@ -10,13 +10,14 @@ use async_broadcast::{InactiveReceiver, Receiver, Sender}; use async_lock::RwLock; use async_trait::async_trait; use committable::Committable; +use drb_computations::DrbComputations; use hotshot_task::{ dependency::{AndDependency, EventDependency}, dependency_task::{DependencyTask, HandleDepOutput}, task::TaskState, }; use hotshot_types::{ - consensus::OuterConsensus, + consensus::{ConsensusMetricsValue, OuterConsensus}, data::{Leaf2, QuorumProposal2}, event::Event, message::{Proposal, UpgradeLock}, @@ -43,6 +44,9 @@ use crate::{ quorum_vote::handlers::{handle_quorum_proposal_validated, submit_vote, update_shared_state}, }; +/// Helper for DRB Computations +pub mod drb_computations; + /// Event handlers for `QuorumProposalValidated`. mod handlers; @@ -79,6 +83,8 @@ pub struct VoteDependencyHandle, V pub receiver: InactiveReceiver>>, /// Lock for a decided upgrade pub upgrade_lock: UpgradeLock, + /// The consensus metrics + pub consensus_metrics: Arc, /// The node's id pub id: u64, /// Number of blocks in an epoch, zero means there are no epochs @@ -231,7 +237,7 @@ impl + 'static, V: Versions> Handl self.private_key.clone(), self.upgrade_lock.clone(), self.view_number, - current_epoch, + self.epoch_height, Arc::clone(&self.storage), leaf, vid_share, @@ -272,12 +278,19 @@ pub struct QuorumVoteTaskState, V: /// Membership for Quorum certs/votes and DA committee certs/votes. pub membership: Arc, + /// Table for the in-progress DRB computation tasks. + //pub drb_computations: BTreeMap>, + pub drb_computations: DrbComputations, + /// Output events to application pub output_event_stream: async_broadcast::Sender>, /// The node's id pub id: u64, + /// The consensus metrics + pub consensus_metrics: Arc, + /// Reference to the storage. pub storage: Arc>, @@ -382,6 +395,7 @@ impl, V: Versions> QuorumVoteTaskS upgrade_lock: self.upgrade_lock.clone(), id: self.id, epoch_height: self.epoch_height, + consensus_metrics: Arc::clone(&self.consensus_metrics), }, ); self.vote_dependencies @@ -406,6 +420,15 @@ impl, V: Versions> QuorumVoteTaskS } } + // Update the metric for the last voted view + if let Ok(last_voted_view_usize) = usize::try_from(*new_view) { + self.consensus_metrics + .last_voted_view + .set(last_voted_view_usize); + } else { + tracing::warn!("Failed to convert last voted view to a usize: {}", new_view); + } + self.latest_voted_view = new_view; return true; @@ -478,7 +501,7 @@ impl, V: Versions> QuorumVoteTaskS ensure!( cert.is_valid_cert( self.membership.da_stake_table(cur_epoch), - self.membership.da_success_threshold(), + self.membership.da_success_threshold(cur_epoch), &self.upgrade_lock ) .await, @@ -690,7 +713,7 @@ impl, V: Versions> QuorumVoteTaskS self.private_key.clone(), self.upgrade_lock.clone(), proposal.data.view_number(), - current_epoch, + self.epoch_height, Arc::clone(&self.storage), proposed_leaf, updated_vid, diff --git a/crates/task-impls/src/transactions.rs b/crates/task-impls/src/transactions.rs index f6da7ffec0..ffe9290d73 100644 --- a/crates/task-impls/src/transactions.rs +++ b/crates/task-impls/src/transactions.rs @@ -127,6 +127,7 @@ impl, V: Versions> TransactionTask &mut self, event_stream: &Sender>>, block_view: TYPES::View, + block_epoch: TYPES::Epoch, ) -> Option { let version = match self.upgrade_lock.version(block_view).await { Ok(v) => v, @@ -137,10 +138,10 @@ impl, V: Versions> TransactionTask }; if version < V::Marketplace::VERSION { - self.handle_view_change_legacy(event_stream, block_view) + self.handle_view_change_legacy(event_stream, block_view, block_epoch) .await } else { - self.handle_view_change_marketplace(event_stream, block_view) + self.handle_view_change_marketplace(event_stream, block_view, block_epoch) .await } } @@ -151,6 +152,7 @@ impl, V: Versions> TransactionTask &mut self, event_stream: &Sender>>, block_view: TYPES::View, + block_epoch: TYPES::Epoch, ) -> Option { let version = match self.upgrade_lock.version(block_view).await { Ok(v) => v, @@ -188,6 +190,7 @@ impl, V: Versions> TransactionTask block_payload.encode(), metadata, block_view, + block_epoch, vec1::vec1![fee], precompute_data, None, @@ -231,6 +234,7 @@ impl, V: Versions> TransactionTask vec![].into(), metadata, block_view, + block_epoch, vec1::vec1![null_fee], Some(precompute_data), None, @@ -251,6 +255,7 @@ impl, V: Versions> TransactionTask async fn produce_block_marketplace( &mut self, block_view: TYPES::View, + block_epoch: TYPES::Epoch, task_start_time: Instant, ) -> Result> { ensure!( @@ -343,6 +348,7 @@ impl, V: Versions> TransactionTask block_payload.encode(), metadata, block_view, + block_epoch, sequencing_fees, None, Some(auction_result), @@ -353,6 +359,7 @@ impl, V: Versions> TransactionTask pub fn null_block( &self, block_view: TYPES::View, + block_epoch: TYPES::Epoch, version: Version, ) -> Option> { let membership_total_nodes = self.membership.total_nodes(self.cur_epoch); @@ -374,6 +381,7 @@ impl, V: Versions> TransactionTask vec![].into(), metadata, block_view, + block_epoch, vec1::vec1![null_fee], Some(precompute_data), Some(TYPES::AuctionResult::default()), @@ -386,6 +394,7 @@ impl, V: Versions> TransactionTask &mut self, event_stream: &Sender>>, block_view: TYPES::View, + block_epoch: TYPES::Epoch, ) -> Option { let task_start_time = Instant::now(); @@ -398,7 +407,7 @@ impl, V: Versions> TransactionTask }; let packed_bundle = match self - .produce_block_marketplace(block_view, task_start_time) + .produce_block_marketplace(block_view, block_epoch, task_start_time) .await { Ok(b) => b, @@ -409,7 +418,7 @@ impl, V: Versions> TransactionTask e ); - let null_block = self.null_block(block_view, version)?; + let null_block = self.null_block(block_view, block_epoch, version)?; // Increment the metric for number of empty blocks proposed self.consensus @@ -438,12 +447,13 @@ impl, V: Versions> TransactionTask &mut self, event_stream: &Sender>>, block_view: TYPES::View, + block_epoch: TYPES::Epoch, ) -> Option { if self.consensus.read().await.is_high_qc_forming_eqc() { tracing::info!("Reached end of epoch. Not getting a new block until we form an eQC."); None } else { - self.handle_view_change_marketplace(event_stream, block_view) + self.handle_view_change_marketplace(event_stream, block_view, block_epoch) .await } } @@ -481,7 +491,7 @@ impl, V: Versions> TransactionTask ); self.cur_view = view; if self.membership.leader(view, self.cur_epoch)? == self.public_key { - self.handle_view_change(&event_stream, view).await; + self.handle_view_change(&event_stream, view, *epoch).await; return Ok(()); } } diff --git a/crates/task-impls/src/view_sync.rs b/crates/task-impls/src/view_sync.rs index efb87f9cd5..3574fe0ee6 100644 --- a/crates/task-impls/src/view_sync.rs +++ b/crates/task-impls/src/view_sync.rs @@ -18,7 +18,7 @@ use hotshot_task::task::TaskState; use hotshot_types::{ message::{GeneralConsensusMessage, UpgradeLock}, simple_certificate::{ - ViewSyncCommitCertificate2, ViewSyncFinalizeCertificate2, ViewSyncPreCommitCertificate2, + ViewSyncCommitCertificate, ViewSyncFinalizeCertificate, ViewSyncPreCommitCertificate, }, simple_vote::{ ViewSyncCommitData, ViewSyncCommitVote, ViewSyncFinalizeData, ViewSyncFinalizeVote, @@ -92,17 +92,16 @@ pub struct ViewSyncTaskState { /// Map of pre-commit vote accumulates for the relay pub pre_commit_relay_map: RwLock< - RelayMap, ViewSyncPreCommitCertificate2, V>, + RelayMap, ViewSyncPreCommitCertificate, V>, >, /// Map of commit vote accumulates for the relay pub commit_relay_map: - RwLock, ViewSyncCommitCertificate2, V>>, + RwLock, ViewSyncCommitCertificate, V>>, /// Map of finalize vote accumulates for the relay - pub finalize_relay_map: RwLock< - RelayMap, ViewSyncFinalizeCertificate2, V>, - >, + pub finalize_relay_map: + RwLock, ViewSyncFinalizeCertificate, V>>, /// Timeout duration for view sync rounds pub view_sync_timeout: Duration, @@ -263,19 +262,19 @@ impl ViewSyncTaskState { event_stream: Sender>>, ) -> Result<()> { match event.as_ref() { - HotShotEvent::ViewSyncPreCommitCertificate2Recv(certificate) => { + HotShotEvent::ViewSyncPreCommitCertificateRecv(certificate) => { tracing::debug!("Received view sync cert for phase {:?}", certificate); let view = certificate.view_number(); self.send_to_or_create_replica(event, view, &event_stream) .await; } - HotShotEvent::ViewSyncCommitCertificate2Recv(certificate) => { + HotShotEvent::ViewSyncCommitCertificateRecv(certificate) => { tracing::debug!("Received view sync cert for phase {:?}", certificate); let view = certificate.view_number(); self.send_to_or_create_replica(event, view, &event_stream) .await; } - HotShotEvent::ViewSyncFinalizeCertificate2Recv(certificate) => { + HotShotEvent::ViewSyncFinalizeCertificateRecv(certificate) => { tracing::debug!("Received view sync cert for phase {:?}", certificate); let view = certificate.view_number(); self.send_to_or_create_replica(event, view, &event_stream) @@ -521,7 +520,7 @@ impl ViewSyncReplicaTaskState { event_stream: Sender>>, ) -> Option { match event.as_ref() { - HotShotEvent::ViewSyncPreCommitCertificate2Recv(certificate) => { + HotShotEvent::ViewSyncPreCommitCertificateRecv(certificate) => { let last_seen_certificate = ViewSyncPhase::PreCommit; // Ignore certificate if it is for an older round @@ -535,7 +534,7 @@ impl ViewSyncReplicaTaskState { if !certificate .is_valid_cert( self.membership.stake_table(self.cur_epoch), - self.membership.failure_threshold(), + self.membership.failure_threshold(self.cur_epoch), &self.upgrade_lock, ) .await @@ -607,7 +606,7 @@ impl ViewSyncReplicaTaskState { })); } - HotShotEvent::ViewSyncCommitCertificate2Recv(certificate) => { + HotShotEvent::ViewSyncCommitCertificateRecv(certificate) => { let last_seen_certificate = ViewSyncPhase::Commit; // Ignore certificate if it is for an older round @@ -621,7 +620,7 @@ impl ViewSyncReplicaTaskState { if !certificate .is_valid_cert( self.membership.stake_table(self.cur_epoch), - self.membership.success_threshold(), + self.membership.success_threshold(self.cur_epoch), &self.upgrade_lock, ) .await @@ -706,7 +705,7 @@ impl ViewSyncReplicaTaskState { })); } - HotShotEvent::ViewSyncFinalizeCertificate2Recv(certificate) => { + HotShotEvent::ViewSyncFinalizeCertificateRecv(certificate) => { // Ignore certificate if it is for an older round if certificate.view_number() < self.next_view { tracing::warn!("We're already in a higher round"); @@ -718,7 +717,7 @@ impl ViewSyncReplicaTaskState { if !certificate .is_valid_cert( self.membership.stake_table(self.cur_epoch), - self.membership.success_threshold(), + self.membership.success_threshold(self.cur_epoch), &self.upgrade_lock, ) .await diff --git a/crates/task-impls/src/vote_collection.rs b/crates/task-impls/src/vote_collection.rs index e0fc96040b..d7ad469746 100644 --- a/crates/task-impls/src/vote_collection.rs +++ b/crates/task-impls/src/vote_collection.rs @@ -17,12 +17,12 @@ use either::Either::{self, Left, Right}; use hotshot_types::{ message::UpgradeLock, simple_certificate::{ - DaCertificate, QuorumCertificate, QuorumCertificate2, TimeoutCertificate, - UpgradeCertificate, ViewSyncCommitCertificate2, ViewSyncFinalizeCertificate2, - ViewSyncPreCommitCertificate2, + DaCertificate2, QuorumCertificate, QuorumCertificate2, TimeoutCertificate, + UpgradeCertificate, ViewSyncCommitCertificate, ViewSyncFinalizeCertificate, + ViewSyncPreCommitCertificate, }, simple_vote::{ - DaVote, QuorumVote, QuorumVote2, TimeoutVote, UpgradeVote, ViewSyncCommitVote, + DaVote2, QuorumVote, QuorumVote2, TimeoutVote, UpgradeVote, ViewSyncCommitVote, ViewSyncFinalizeVote, ViewSyncPreCommitVote, }, traits::{ @@ -307,7 +307,8 @@ where type QuorumVoteState = VoteCollectionTaskState, QuorumCertificate2, V>; /// Alias for DA vote accumulator -type DaVoteState = VoteCollectionTaskState, DaCertificate, V>; +type DaVoteState = + VoteCollectionTaskState, DaCertificate2, V>; /// Alias for Timeout vote accumulator type TimeoutVoteState = VoteCollectionTaskState, TimeoutCertificate, V>; @@ -318,17 +319,17 @@ type UpgradeVoteState = type ViewSyncPreCommitState = VoteCollectionTaskState< TYPES, ViewSyncPreCommitVote, - ViewSyncPreCommitCertificate2, + ViewSyncPreCommitCertificate, V, >; /// Alias for View Sync Commit vote accumulator type ViewSyncCommitVoteState = - VoteCollectionTaskState, ViewSyncCommitCertificate2, V>; + VoteCollectionTaskState, ViewSyncCommitCertificate, V>; /// Alias for View Sync Finalize vote accumulator type ViewSyncFinalizeVoteState = VoteCollectionTaskState< TYPES, ViewSyncFinalizeVote, - ViewSyncFinalizeCertificate2, + ViewSyncFinalizeCertificate, V, >; @@ -386,8 +387,8 @@ impl AggregatableVote, UpgradeCertifi } } -impl AggregatableVote, DaCertificate> - for DaVote +impl AggregatableVote, DaCertificate2> + for DaVote2 { fn leader( &self, @@ -397,7 +398,7 @@ impl AggregatableVote, DaCertificate, + certificate: DaCertificate2, key: &TYPES::SignatureKey, ) -> HotShotEvent { HotShotEvent::DacSend(certificate, key.clone()) @@ -423,7 +424,7 @@ impl AggregatableVote, TimeoutCertifi } impl - AggregatableVote, ViewSyncCommitCertificate2> + AggregatableVote, ViewSyncCommitCertificate> for ViewSyncCommitVote { fn leader( @@ -434,15 +435,15 @@ impl membership.leader(self.date().round + self.date().relay, epoch) } fn make_cert_event( - certificate: ViewSyncCommitCertificate2, + certificate: ViewSyncCommitCertificate, key: &TYPES::SignatureKey, ) -> HotShotEvent { - HotShotEvent::ViewSyncCommitCertificate2Send(certificate, key.clone()) + HotShotEvent::ViewSyncCommitCertificateSend(certificate, key.clone()) } } impl - AggregatableVote, ViewSyncPreCommitCertificate2> + AggregatableVote, ViewSyncPreCommitCertificate> for ViewSyncPreCommitVote { fn leader( @@ -453,15 +454,15 @@ impl membership.leader(self.date().round + self.date().relay, epoch) } fn make_cert_event( - certificate: ViewSyncPreCommitCertificate2, + certificate: ViewSyncPreCommitCertificate, key: &TYPES::SignatureKey, ) -> HotShotEvent { - HotShotEvent::ViewSyncPreCommitCertificate2Send(certificate, key.clone()) + HotShotEvent::ViewSyncPreCommitCertificateSend(certificate, key.clone()) } } impl - AggregatableVote, ViewSyncFinalizeCertificate2> + AggregatableVote, ViewSyncFinalizeCertificate> for ViewSyncFinalizeVote { fn leader( @@ -472,10 +473,10 @@ impl membership.leader(self.date().round + self.date().relay, epoch) } fn make_cert_event( - certificate: ViewSyncFinalizeCertificate2, + certificate: ViewSyncFinalizeCertificate, key: &TYPES::SignatureKey, ) -> HotShotEvent { - HotShotEvent::ViewSyncFinalizeCertificate2Send(certificate, key.clone()) + HotShotEvent::ViewSyncFinalizeCertificateSend(certificate, key.clone()) } } @@ -522,14 +523,14 @@ impl } #[async_trait] -impl HandleVoteEvent, DaCertificate> +impl HandleVoteEvent, DaCertificate2> for DaVoteState { async fn handle_vote_event( &mut self, event: Arc>, sender: &Sender>>, - ) -> Result>> { + ) -> Result>> { match event.as_ref() { HotShotEvent::DaVoteRecv(vote) => self.accumulate_vote(vote, sender).await, _ => Ok(None), @@ -562,14 +563,14 @@ impl #[async_trait] impl - HandleVoteEvent, ViewSyncPreCommitCertificate2> + HandleVoteEvent, ViewSyncPreCommitCertificate> for ViewSyncPreCommitState { async fn handle_vote_event( &mut self, event: Arc>, sender: &Sender>>, - ) -> Result>> { + ) -> Result>> { match event.as_ref() { HotShotEvent::ViewSyncPreCommitVoteRecv(vote) => { self.accumulate_vote(vote, sender).await @@ -584,14 +585,14 @@ impl #[async_trait] impl - HandleVoteEvent, ViewSyncCommitCertificate2> + HandleVoteEvent, ViewSyncCommitCertificate> for ViewSyncCommitVoteState { async fn handle_vote_event( &mut self, event: Arc>, sender: &Sender>>, - ) -> Result>> { + ) -> Result>> { match event.as_ref() { HotShotEvent::ViewSyncCommitVoteRecv(vote) => self.accumulate_vote(vote, sender).await, _ => Ok(None), @@ -604,14 +605,14 @@ impl #[async_trait] impl - HandleVoteEvent, ViewSyncFinalizeCertificate2> + HandleVoteEvent, ViewSyncFinalizeCertificate> for ViewSyncFinalizeVoteState { async fn handle_vote_event( &mut self, event: Arc>, sender: &Sender>>, - ) -> Result>> { + ) -> Result>> { match event.as_ref() { HotShotEvent::ViewSyncFinalizeVoteRecv(vote) => { self.accumulate_vote(vote, sender).await diff --git a/crates/testing/src/helpers.rs b/crates/testing/src/helpers.rs index 6759d17e69..9097f38d02 100644 --- a/crates/testing/src/helpers.rs +++ b/crates/testing/src/helpers.rs @@ -27,8 +27,8 @@ use hotshot_types::{ consensus::ConsensusMetricsValue, data::{Leaf, Leaf2, QuorumProposal, VidDisperse, VidDisperseShare}, message::{GeneralConsensusMessage, Proposal, UpgradeLock}, - simple_certificate::DaCertificate, - simple_vote::{DaData, DaVote, QuorumData, QuorumVote, SimpleVote, VersionedVoteData}, + simple_certificate::DaCertificate2, + simple_vote::{DaData2, DaVote2, QuorumData, QuorumVote, SimpleVote, VersionedVoteData}, traits::{ block_contents::vid_commitment, consensus_api::ConsensusApi, @@ -140,7 +140,7 @@ pub async fn build_cert< CERT: Certificate, >( data: DATAType, - membership: &TYPES::Membership, + da_membership: &TYPES::Membership, view: TYPES::View, epoch: TYPES::Epoch, public_key: &TYPES::SignatureKey, @@ -149,7 +149,7 @@ pub async fn build_cert< ) -> CERT { let real_qc_sig = build_assembled_sig::( &data, - membership, + da_membership, view, epoch, upgrade_lock, @@ -215,7 +215,7 @@ pub async fn build_assembled_sig< let real_qc_pp: ::QcParams = ::public_parameter( stake_table.clone(), - U256::from(CERT::threshold(membership)), + U256::from(CERT::threshold(membership, epoch)), ); let total_nodes = stake_table.len(); let signers = bitvec![1; total_nodes]; @@ -364,17 +364,18 @@ pub async fn build_da_certificate( public_key: &TYPES::SignatureKey, private_key: &::PrivateKey, upgrade_lock: &UpgradeLock, -) -> DaCertificate { +) -> DaCertificate2 { let encoded_transactions = TestTransaction::encode(&transactions); let da_payload_commitment = vid_commitment(&encoded_transactions, membership.total_nodes(epoch_number)); - let da_data = DaData { + let da_data = DaData2 { payload_commit: da_payload_commitment, + epoch: epoch_number, }; - build_cert::, DaCertificate>( + build_cert::, DaVote2, DaCertificate2>( da_data, membership, view_number, diff --git a/crates/testing/src/test_runner.rs b/crates/testing/src/test_runner.rs index 414e6e0f4b..ffee9b39e5 100644 --- a/crates/testing/src/test_runner.rs +++ b/crates/testing/src/test_runner.rs @@ -308,7 +308,7 @@ where for node in &mut *nodes { node.handle.shut_down().await; } - tracing::info!("Nodes shtudown"); + tracing::info!("Nodes shutdown"); completion_handle.abort(); diff --git a/crates/testing/src/view_generator.rs b/crates/testing/src/view_generator.rs index 8ff60cf949..e31f98364f 100644 --- a/crates/testing/src/view_generator.rs +++ b/crates/testing/src/view_generator.rs @@ -22,16 +22,17 @@ use hotshot_example_types::{ }; use hotshot_types::{ data::{ - DaProposal, EpochNumber, Leaf, Leaf2, QuorumProposal2, VidDisperse, VidDisperseShare, + DaProposal2, EpochNumber, Leaf, Leaf2, QuorumProposal2, VidDisperse, VidDisperseShare, ViewChangeEvidence, ViewNumber, }, + drb::{INITIAL_DRB_RESULT, INITIAL_DRB_SEED_INPUT}, message::{Proposal, UpgradeLock}, simple_certificate::{ - DaCertificate, QuorumCertificate, QuorumCertificate2, TimeoutCertificate, - UpgradeCertificate, ViewSyncFinalizeCertificate2, + DaCertificate2, QuorumCertificate, QuorumCertificate2, TimeoutCertificate, + UpgradeCertificate, ViewSyncFinalizeCertificate, }, simple_vote::{ - DaData, DaVote, QuorumData2, QuorumVote2, TimeoutData, TimeoutVote, UpgradeProposalData, + DaData2, DaVote2, QuorumData2, QuorumVote2, TimeoutData, TimeoutVote, UpgradeProposalData, UpgradeVote, ViewSyncFinalizeData, ViewSyncFinalizeVote, }, traits::{ @@ -39,6 +40,7 @@ use hotshot_types::{ node_implementation::{ConsensusTime, NodeType}, BlockPayload, }, + utils::epoch_from_block_number, }; use rand::{thread_rng, Rng}; use sha2::{Digest, Sha256}; @@ -49,7 +51,7 @@ use crate::helpers::{ #[derive(Clone)] pub struct TestView { - pub da_proposal: Proposal>, + pub da_proposal: Proposal>, pub quorum_proposal: Proposal>, pub leaf: Leaf2, pub view_number: ViewNumber, @@ -61,7 +63,7 @@ pub struct TestView { ::SignatureKey, ), pub leader_public_key: ::SignatureKey, - pub da_certificate: DaCertificate, + pub da_certificate: DaCertificate2, pub transactions: Vec, upgrade_data: Option>, formed_upgrade_certificate: Option>, @@ -73,7 +75,7 @@ pub struct TestView { impl TestView { pub async fn genesis(membership: &::Membership) -> Self { let genesis_view = ViewNumber::new(1); - let genesis_epoch = EpochNumber::new(1); + let genesis_epoch = EpochNumber::new(0); let upgrade_lock = UpgradeLock::new(); let transactions = Vec::new(); @@ -141,8 +143,8 @@ impl TestView { .to_qc2(), upgrade_certificate: None, view_change_evidence: None, - drb_result: [0; 32], - drb_seed: [0; 96], + drb_result: INITIAL_DRB_RESULT, + drb_seed: INITIAL_DRB_SEED_INPUT, }; let encoded_transactions = Arc::from(TestTransaction::encode(&transactions)); @@ -151,10 +153,11 @@ impl TestView { ::SignatureKey::sign(&private_key, &encoded_transactions_hash) .expect("Failed to sign block payload"); - let da_proposal_inner = DaProposal:: { + let da_proposal_inner = DaProposal2:: { encoded_transactions: encoded_transactions.clone(), metadata, view_number: genesis_view, + epoch_number: genesis_epoch, }; let da_proposal = Proposal { @@ -216,6 +219,7 @@ impl TestView { let quorum_data = QuorumData2 { leaf_commit: old.leaf.commit(), + epoch: EpochNumber::new(0), }; let (old_private_key, old_public_key) = key_pair_for_id::(*old_view); @@ -305,7 +309,7 @@ impl TestView { TestVersions, ViewSyncFinalizeData, ViewSyncFinalizeVote, - ViewSyncFinalizeCertificate2, + ViewSyncFinalizeCertificate, >( data.clone(), membership, @@ -368,8 +372,8 @@ impl TestView { justify_qc: quorum_certificate.clone(), upgrade_certificate: upgrade_certificate.clone(), view_change_evidence, - drb_result: [0; 32], - drb_seed: [0; 96], + drb_result: INITIAL_DRB_RESULT, + drb_seed: INITIAL_DRB_SEED_INPUT, }; let mut leaf = Leaf2::from_quorum_proposal(&proposal); @@ -392,10 +396,11 @@ impl TestView { ::SignatureKey::sign(&private_key, &encoded_transactions_hash) .expect("Failed to sign block payload"); - let da_proposal_inner = DaProposal:: { + let da_proposal_inner = DaProposal2:: { encoded_transactions: encoded_transactions.clone(), metadata, view_number: next_view, + epoch_number: self.epoch_number, }; let da_proposal = Proposal { @@ -441,6 +446,10 @@ impl TestView { QuorumVote2::::create_signed_vote( QuorumData2 { leaf_commit: self.leaf.commit(), + epoch: EpochNumber::new(epoch_from_block_number( + self.leaf.height(), + handle.hotshot.config.epoch_height, + )), }, self.view_number, &handle.public_key(), @@ -469,10 +478,10 @@ impl TestView { pub async fn create_da_vote( &self, - data: DaData, + data: DaData2, handle: &SystemContextHandle, - ) -> DaVote { - DaVote::create_signed_vote( + ) -> DaVote2 { + DaVote2::create_signed_vote( data, self.view_number, &handle.public_key(), diff --git a/crates/testing/src/view_sync_task.rs b/crates/testing/src/view_sync_task.rs index 733164d341..914c8279cd 100644 --- a/crates/testing/src/view_sync_task.rs +++ b/crates/testing/src/view_sync_task.rs @@ -48,12 +48,12 @@ impl> TestTaskState | HotShotEvent::ViewSyncPreCommitVoteSend(_) | HotShotEvent::ViewSyncCommitVoteSend(_) | HotShotEvent::ViewSyncFinalizeVoteSend(_) - | HotShotEvent::ViewSyncPreCommitCertificate2Recv(_) - | HotShotEvent::ViewSyncCommitCertificate2Recv(_) - | HotShotEvent::ViewSyncFinalizeCertificate2Recv(_) - | HotShotEvent::ViewSyncPreCommitCertificate2Send(_, _) - | HotShotEvent::ViewSyncCommitCertificate2Send(_, _) - | HotShotEvent::ViewSyncFinalizeCertificate2Send(_, _) + | HotShotEvent::ViewSyncPreCommitCertificateRecv(_) + | HotShotEvent::ViewSyncCommitCertificateRecv(_) + | HotShotEvent::ViewSyncFinalizeCertificateRecv(_) + | HotShotEvent::ViewSyncPreCommitCertificateSend(_, _) + | HotShotEvent::ViewSyncCommitCertificateSend(_, _) + | HotShotEvent::ViewSyncFinalizeCertificateSend(_, _) | HotShotEvent::ViewSyncTrigger(_) => { self.hit_view_sync.insert(id); } diff --git a/crates/testing/tests/tests_1/block_builder.rs b/crates/testing/tests/tests_1/block_builder.rs index 5b0a6cf5c2..fc29b1c01d 100644 --- a/crates/testing/tests/tests_1/block_builder.rs +++ b/crates/testing/tests/tests_1/block_builder.rs @@ -12,7 +12,7 @@ use std::{ use hotshot_builder_api::v0_1::block_info::AvailableBlockData; use hotshot_example_types::{ block_types::{TestBlockPayload, TestMetadata, TestTransaction}, - node_types::{TestTypes, TestVersions}, + node_types::TestTypes, }; use hotshot_task_impls::builder::{BuilderClient, BuilderClientError}; use hotshot_testing::block_builder::{ @@ -21,14 +21,13 @@ use hotshot_testing::block_builder::{ use hotshot_types::{ network::RandomBuilderConfig, traits::{ - block_contents::vid_commitment, - node_implementation::{NodeType, Versions}, - signature_key::SignatureKey, + block_contents::vid_commitment, node_implementation::NodeType, signature_key::SignatureKey, BlockPayload, }, }; use tide_disco::Url; use tokio::time::sleep; +use vbs::version::StaticVersion; #[cfg(test)] #[tokio::test(flavor = "multi_thread")] @@ -50,8 +49,7 @@ async fn test_random_block_builder() { let builder_started = Instant::now(); - let client: BuilderClient::Base> = - BuilderClient::new(api_url); + let client: BuilderClient> = BuilderClient::new(api_url); assert!(client.connect(Duration::from_millis(100)).await); let (pub_key, private_key) = diff --git a/crates/testing/tests/tests_1/da_task.rs b/crates/testing/tests/tests_1/da_task.rs index 6f4c2a38df..e672ff24e3 100644 --- a/crates/testing/tests/tests_1/da_task.rs +++ b/crates/testing/tests/tests_1/da_task.rs @@ -23,7 +23,7 @@ use hotshot_testing::{ }; use hotshot_types::{ data::{null_block, EpochNumber, PackedBundle, ViewNumber}, - simple_vote::DaData, + simple_vote::DaData2, traits::{ block_contents::precompute_vid_commitment, election::Membership, @@ -63,8 +63,14 @@ async fn test_da_task() { proposals.push(view.da_proposal.clone()); leaders.push(view.leader_public_key); votes.push( - view.create_da_vote(DaData { payload_commit }, &handle) - .await, + view.create_da_vote( + DaData2 { + payload_commit, + epoch: EpochNumber::new(0), + }, + &handle, + ) + .await, ); dacs.push(view.da_certificate.clone()); vids.push(view.vid_proposal.clone()); @@ -76,8 +82,14 @@ async fn test_da_task() { proposals.push(view.da_proposal.clone()); leaders.push(view.leader_public_key); votes.push( - view.create_da_vote(DaData { payload_commit }, &handle) - .await, + view.create_da_vote( + DaData2 { + payload_commit, + epoch: EpochNumber::new(0), + }, + &handle, + ) + .await, ); dacs.push(view.da_certificate.clone()); vids.push(view.vid_proposal.clone()); @@ -85,14 +97,15 @@ async fn test_da_task() { let inputs = vec![ serial![ - ViewChange(ViewNumber::new(1), EpochNumber::new(1)), - ViewChange(ViewNumber::new(2), EpochNumber::new(1)), + ViewChange(ViewNumber::new(1), EpochNumber::new(0)), + ViewChange(ViewNumber::new(2), EpochNumber::new(0)), BlockRecv(PackedBundle::new( encoded_transactions.clone(), TestMetadata { num_transactions: transactions.len() as u64 }, ViewNumber::new(2), + EpochNumber::new(0), vec1::vec1![null_block::builder_fee::( membership.total_nodes(EpochNumber::new(0)), ::Base::VERSION, @@ -158,8 +171,14 @@ async fn test_da_task_storage_failure() { proposals.push(view.da_proposal.clone()); leaders.push(view.leader_public_key); votes.push( - view.create_da_vote(DaData { payload_commit }, &handle) - .await, + view.create_da_vote( + DaData2 { + payload_commit, + epoch: EpochNumber::new(0), + }, + &handle, + ) + .await, ); dacs.push(view.da_certificate.clone()); vids.push(view.vid_proposal.clone()); @@ -171,8 +190,14 @@ async fn test_da_task_storage_failure() { proposals.push(view.da_proposal.clone()); leaders.push(view.leader_public_key); votes.push( - view.create_da_vote(DaData { payload_commit }, &handle) - .await, + view.create_da_vote( + DaData2 { + payload_commit, + epoch: EpochNumber::new(0), + }, + &handle, + ) + .await, ); dacs.push(view.da_certificate.clone()); vids.push(view.vid_proposal.clone()); @@ -180,14 +205,15 @@ async fn test_da_task_storage_failure() { let inputs = vec![ serial![ - ViewChange(ViewNumber::new(1), EpochNumber::new(1)), - ViewChange(ViewNumber::new(2), EpochNumber::new(1)), + ViewChange(ViewNumber::new(1), EpochNumber::new(0)), + ViewChange(ViewNumber::new(2), EpochNumber::new(0)), BlockRecv(PackedBundle::new( encoded_transactions.clone(), TestMetadata { num_transactions: transactions.len() as u64 }, ViewNumber::new(2), + EpochNumber::new(0), vec1::vec1![null_block::builder_fee::( membership.total_nodes(EpochNumber::new(0)), ::Base::VERSION, diff --git a/crates/testing/tests/tests_1/message.rs b/crates/testing/tests/tests_1/message.rs index c8869bc3a5..9536cf0f22 100644 --- a/crates/testing/tests/tests_1/message.rs +++ b/crates/testing/tests/tests_1/message.rs @@ -66,8 +66,7 @@ async fn test_certificate2_validity() { use hotshot_testing::{helpers::build_system_handle, view_generator::TestViewGenerator}; use hotshot_types::{ data::{EpochNumber, Leaf, Leaf2}, - traits::election::Membership, - traits::node_implementation::ConsensusTime, + traits::{election::Membership, node_implementation::ConsensusTime}, vote::Certificate, }; @@ -105,7 +104,7 @@ async fn test_certificate2_validity() { assert!( qc.is_valid_cert( membership.stake_table(EpochNumber::new(0)), - membership.success_threshold(), + membership.success_threshold(EpochNumber::new(0)), &handle.hotshot.upgrade_lock ) .await @@ -114,7 +113,7 @@ async fn test_certificate2_validity() { assert!( qc2.is_valid_cert( membership.stake_table(EpochNumber::new(0)), - membership.success_threshold(), + membership.success_threshold(EpochNumber::new(0)), &handle.hotshot.upgrade_lock ) .await diff --git a/crates/testing/tests/tests_1/quorum_proposal_task.rs b/crates/testing/tests/tests_1/quorum_proposal_task.rs index 0124e10b5a..25e70fc6c2 100644 --- a/crates/testing/tests/tests_1/quorum_proposal_task.rs +++ b/crates/testing/tests/tests_1/quorum_proposal_task.rs @@ -440,7 +440,7 @@ async fn test_quorum_proposal_task_view_sync() { }; let inputs = vec![random![ - ViewSyncFinalizeCertificate2Recv(cert.clone()), + ViewSyncFinalizeCertificateRecv(cert.clone()), SendPayloadCommitmentAndMetadata( payload_commitment, builder_commitment, diff --git a/crates/testing/tests/tests_1/test_success.rs b/crates/testing/tests/tests_1/test_success.rs index e81060aedb..982b7018f6 100644 --- a/crates/testing/tests/tests_1/test_success.rs +++ b/crates/testing/tests/tests_1/test_success.rs @@ -41,6 +41,25 @@ cross_tests!( }, ); +// cross_tests!( +// TestName: test_epoch_success, +// Impls: [MemoryImpl, Libp2pImpl, PushCdnImpl], +// Types: [TestTypes, TestTypesRandomizedLeader, TestTypesRandomizedCommitteeMembers>, TestTypesRandomizedCommitteeMembers>], +// Versions: [EpochsTestVersions], +// Ignore: false, +// Metadata: { +// TestDescription { +// // allow more time to pass in CI +// completion_task_description: CompletionTaskDescription::TimeBasedCompletionTaskBuilder( +// TimeBasedCompletionTaskDescription { +// duration: Duration::from_secs(60), +// }, +// ), +// ..TestDescription::default() +// } +// }, +// ); + cross_tests!( TestName: test_success_with_async_delay, Impls: [MemoryImpl, Libp2pImpl, PushCdnImpl], diff --git a/crates/testing/tests/tests_1/transaction_task.rs b/crates/testing/tests/tests_1/transaction_task.rs index 43773e63da..e4ed70be64 100644 --- a/crates/testing/tests/tests_1/transaction_task.rs +++ b/crates/testing/tests/tests_1/transaction_task.rs @@ -52,6 +52,7 @@ async fn test_transaction_task_leader_two_views_in_a_row() { num_transactions: 0, }, current_view, + EpochNumber::new(1), vec1::vec1![ null_block::builder_fee::( handle.hotshot.memberships.total_nodes(EpochNumber::new(0)), diff --git a/crates/testing/tests/tests_1/vid_task.rs b/crates/testing/tests/tests_1/vid_task.rs index 81f9ac2999..3bf19bbc38 100644 --- a/crates/testing/tests/tests_1/vid_task.rs +++ b/crates/testing/tests/tests_1/vid_task.rs @@ -108,6 +108,7 @@ async fn test_vid_task() { num_transactions: transactions.len() as u64 }, ViewNumber::new(2), + EpochNumber::new(0), vec1::vec1![null_block::builder_fee::( membership.total_nodes(EpochNumber::new(0)), ::Base::VERSION, diff --git a/crates/testing/tests/tests_1/vote_dependency_handle.rs b/crates/testing/tests/tests_1/vote_dependency_handle.rs index 9de2531361..1b12e0b0f0 100644 --- a/crates/testing/tests/tests_1/vote_dependency_handle.rs +++ b/crates/testing/tests/tests_1/vote_dependency_handle.rs @@ -88,6 +88,7 @@ async fn test_vote_dependency_handle() { public_key: handle.public_key(), private_key: handle.private_key().clone(), consensus: OuterConsensus::new(consensus.clone()), + consensus_metrics: Arc::clone(&consensus.read().await.metrics), instance_state: handle.hotshot.instance_state(), quorum_membership: (*handle.hotshot.memberships).clone().into(), storage: Arc::clone(&handle.storage()), diff --git a/crates/types/src/consensus.rs b/crates/types/src/consensus.rs index 81250b7b5b..0c2e8d936f 100644 --- a/crates/types/src/consensus.rs +++ b/crates/types/src/consensus.rs @@ -25,7 +25,7 @@ use crate::{ error::HotShotError, event::{HotShotAction, LeafInfo}, message::Proposal, - simple_certificate::{DaCertificate, QuorumCertificate2}, + simple_certificate::{DaCertificate2, QuorumCertificate2}, traits::{ block_contents::BuilderFee, metrics::{Counter, Gauge, Histogram, Metrics, NoMetrics}, @@ -281,7 +281,7 @@ pub struct Consensus { /// All the DA certs we've received for current and future views. /// view -> DA cert - saved_da_certs: HashMap>, + saved_da_certs: HashMap>, /// View number that is currently on. cur_view: TYPES::View, @@ -331,6 +331,8 @@ pub struct ConsensusMetricsValue { pub last_synced_block_height: Box, /// The number of last decided view pub last_decided_view: Box, + /// The number of the last voted view + pub last_voted_view: Box, /// Number of timestamp for the last decided time pub last_decided_time: Box, /// The current view @@ -365,6 +367,7 @@ impl ConsensusMetricsValue { last_synced_block_height: metrics .create_gauge(String::from("last_synced_block_height"), None), last_decided_view: metrics.create_gauge(String::from("last_decided_view"), None), + last_voted_view: metrics.create_gauge(String::from("last_voted_view"), None), last_decided_time: metrics.create_gauge(String::from("last_decided_time"), None), current_view: metrics.create_gauge(String::from("current_view"), None), number_of_views_since_last_decide: metrics @@ -476,7 +479,7 @@ impl Consensus { } /// Get the saved DA certs. - pub fn saved_da_certs(&self) -> &HashMap> { + pub fn saved_da_certs(&self) -> &HashMap> { &self.saved_da_certs } @@ -744,7 +747,7 @@ impl Consensus { } /// Add a new entry to the da_certs map. - pub fn update_saved_da_certs(&mut self, view_number: TYPES::View, cert: DaCertificate) { + pub fn update_saved_da_certs(&mut self, view_number: TYPES::View, cert: DaCertificate2) { self.saved_da_certs.insert(view_number, cert); } diff --git a/crates/types/src/data.rs b/crates/types/src/data.rs index 39a608f2f0..b6ec66f16a 100644 --- a/crates/types/src/data.rs +++ b/crates/types/src/data.rs @@ -30,10 +30,11 @@ use utils::anytrace::*; use vec1::Vec1; use crate::{ + drb::{DrbResult, DrbSeedInput, INITIAL_DRB_RESULT, INITIAL_DRB_SEED_INPUT}, message::{Proposal, UpgradeLock}, simple_certificate::{ QuorumCertificate, QuorumCertificate2, TimeoutCertificate, UpgradeCertificate, - ViewSyncFinalizeCertificate2, + ViewSyncFinalizeCertificate, }, simple_vote::{QuorumData, UpgradeProposalData, VersionedVoteData}, traits::{ @@ -146,6 +147,41 @@ pub struct DaProposal { pub view_number: TYPES::View, } +/// A proposal to start providing data availability for a block. +#[derive(derive_more::Debug, Serialize, Deserialize, Clone, Eq, PartialEq, Hash)] +#[serde(bound = "TYPES: NodeType")] +pub struct DaProposal2 { + /// Encoded transactions in the block to be applied. + pub encoded_transactions: Arc<[u8]>, + /// Metadata of the block to be applied. + pub metadata: >::Metadata, + /// View this proposal applies to + pub view_number: TYPES::View, + /// Epoch this proposal applies to + pub epoch_number: TYPES::Epoch, +} + +impl From> for DaProposal2 { + fn from(da_proposal: DaProposal) -> Self { + Self { + encoded_transactions: da_proposal.encoded_transactions, + metadata: da_proposal.metadata, + view_number: da_proposal.view_number, + epoch_number: TYPES::Epoch::new(0), + } + } +} + +impl From> for DaProposal { + fn from(da_proposal2: DaProposal2) -> Self { + Self { + encoded_transactions: da_proposal2.encoded_transactions, + metadata: da_proposal2.metadata, + view_number: da_proposal2.view_number, + } + } +} + /// A proposal to upgrade the network #[derive(derive_more::Debug, Serialize, Deserialize, Clone, Eq, PartialEq, Hash)] #[serde(bound = "TYPES: NodeType")] @@ -238,7 +274,7 @@ pub enum ViewChangeEvidence { /// Holds a timeout certificate. Timeout(TimeoutCertificate), /// Holds a view sync finalized certificate. - ViewSync(ViewSyncFinalizeCertificate2), + ViewSync(ViewSyncFinalizeCertificate), } impl ViewChangeEvidence { @@ -391,13 +427,17 @@ pub struct QuorumProposal2 { /// Possible timeout or view sync certificate. If the `justify_qc` is not for a proposal in the immediately preceding view, then either a timeout or view sync certificate must be attached. pub view_change_evidence: Option>, - /// the DRB seed currently being calculated + /// The DRB seed for the next epoch. + /// + /// The DRB computation using this seed was started in the previous epoch. #[serde(with = "serde_bytes")] - pub drb_seed: [u8; 96], + pub drb_seed: DrbSeedInput, - /// the result of the DRB calculation + /// The DRB result for the current epoch. + /// + /// The DRB computation with this result was started two epochs ago. #[serde(with = "serde_bytes")] - pub drb_result: [u8; 32], + pub drb_result: DrbResult, } impl From> for QuorumProposal2 { @@ -408,20 +448,20 @@ impl From> for QuorumProposal2 { justify_qc: quorum_proposal.justify_qc.to_qc2(), upgrade_certificate: quorum_proposal.upgrade_certificate, view_change_evidence: quorum_proposal.proposal_certificate, - drb_seed: [0; 96], - drb_result: [0; 32], + drb_seed: INITIAL_DRB_SEED_INPUT, + drb_result: INITIAL_DRB_RESULT, } } } impl From> for QuorumProposal { - fn from(quorum_proposal: QuorumProposal2) -> Self { + fn from(quorum_proposal2: QuorumProposal2) -> Self { Self { - block_header: quorum_proposal.block_header, - view_number: quorum_proposal.view_number, - justify_qc: quorum_proposal.justify_qc.to_qc(), - upgrade_certificate: quorum_proposal.upgrade_certificate, - proposal_certificate: quorum_proposal.view_change_evidence, + block_header: quorum_proposal2.block_header, + view_number: quorum_proposal2.view_number, + justify_qc: quorum_proposal2.justify_qc.to_qc(), + upgrade_certificate: quorum_proposal2.upgrade_certificate, + proposal_certificate: quorum_proposal2.view_change_evidence, } } } @@ -438,8 +478,8 @@ impl From> for Leaf2 { upgrade_certificate: leaf.upgrade_certificate, block_payload: leaf.block_payload, view_change_evidence: None, - drb_seed: [0; 96], - drb_result: [0; 32], + drb_seed: INITIAL_DRB_SEED_INPUT, + drb_result: INITIAL_DRB_RESULT, } } } @@ -450,6 +490,12 @@ impl HasViewNumber for DaProposal { } } +impl HasViewNumber for DaProposal2 { + fn view_number(&self) -> TYPES::View { + self.view_number + } +} + impl HasViewNumber for VidDisperse { fn view_number(&self) -> TYPES::View { self.view_number @@ -562,13 +608,17 @@ pub struct Leaf2 { /// Possible timeout or view sync certificate. If the `justify_qc` is not for a proposal in the immediately preceding view, then either a timeout or view sync certificate must be attached. pub view_change_evidence: Option>, - /// the DRB seed currently being calculated + /// The DRB seed for the next epoch. + /// + /// The DRB computation using this seed was started in the previous epoch. #[serde(with = "serde_bytes")] - pub drb_seed: [u8; 96], + pub drb_seed: DrbSeedInput, - /// the result of the DRB calculation + /// The DRB result for the current epoch. + /// + /// The DRB computation with this result was started two epochs ago. #[serde(with = "serde_bytes")] - pub drb_result: [u8; 32], + pub drb_result: DrbResult, } impl Leaf2 { @@ -688,7 +738,7 @@ impl Leaf2 { impl Committable for Leaf2 { fn commit(&self) -> committable::Commitment { - if self.drb_seed == [0; 96] && self.drb_result == [0; 32] { + if self.drb_seed == [0; 32] && self.drb_result == [0; 32] { RawCommitmentBuilder::new("leaf commitment") .u64_field("view number", *self.view_number) .field("parent leaf commitment", self.parent_commitment) @@ -1217,6 +1267,9 @@ pub struct PackedBundle { /// The view number that this block is associated with. pub view_number: TYPES::View, + /// The view number that this block is associated with. + pub epoch_number: TYPES::Epoch, + /// The sequencing fee for submitting bundles. pub sequencing_fees: Vec1>, @@ -1233,6 +1286,7 @@ impl PackedBundle { encoded_transactions: Arc<[u8]>, metadata: >::Metadata, view_number: TYPES::View, + epoch_number: TYPES::Epoch, sequencing_fees: Vec1>, vid_precompute: Option, auction_result: Option, @@ -1241,6 +1295,7 @@ impl PackedBundle { encoded_transactions, metadata, view_number, + epoch_number, sequencing_fees, vid_precompute, auction_result, diff --git a/crates/hotshot/src/traits/election/dynamic.rs b/crates/types/src/drb.rs similarity index 83% rename from crates/hotshot/src/traits/election/dynamic.rs rename to crates/types/src/drb.rs index eba6f100ec..1790e9b30f 100644 --- a/crates/hotshot/src/traits/election/dynamic.rs +++ b/crates/types/src/drb.rs @@ -6,9 +6,10 @@ use std::hash::{DefaultHasher, Hash, Hasher}; -use hotshot_types::traits::{node_implementation::NodeType, signature_key::SignatureKey}; use sha2::{Digest, Sha256}; +use crate::traits::{node_implementation::NodeType, signature_key::SignatureKey}; + // TODO: Add the following consts once we bench the hash time. // // /// Highest number of hashes that a hardware can complete in a second. @@ -22,6 +23,17 @@ use sha2::{Digest, Sha256}; /// Arbitrary number of times the hash function will be repeatedly called. const DIFFICULTY_LEVEL: u64 = 10; +/// DRB seed input for epoch 1 and 2. +pub const INITIAL_DRB_SEED_INPUT: [u8; 32] = [0; 32]; +/// DRB result for epoch 1 and 2. +pub const INITIAL_DRB_RESULT: [u8; 32] = [0; 32]; + +/// Alias for DRB seed input for `compute_drb_result`, serialized from the QC signature. +pub type DrbSeedInput = [u8; 32]; + +/// Alias for DRB result from `compute_drb_result`. +pub type DrbResult = [u8; 32]; + // TODO: Use `HASHES_PER_SECOND` * `VIEW_TIMEOUT` * `DRB_CALCULATION_NUM_VIEW` to calculate this // once we bench the hash time. // @@ -40,7 +52,7 @@ pub fn difficulty_level() -> u64 { /// # Arguments /// * `drb_seed_input` - Serialized QC signature. #[must_use] -pub fn compute_drb_result(drb_seed_input: [u8; 32]) -> [u8; 32] { +pub fn compute_drb_result(drb_seed_input: DrbSeedInput) -> DrbResult { let mut hash = drb_seed_input.to_vec(); for _iter in 0..DIFFICULTY_LEVEL { // TODO: This may be optimized to avoid memcopies after we bench the hash time. @@ -61,7 +73,7 @@ pub fn compute_drb_result(drb_seed_input: [u8; 32]) -> [u8; 32] pub fn leader( view_number: usize, stake_table: &[::StakeTableEntry], - drb_result: [u8; 32], + drb_result: DrbResult, ) -> TYPES::SignatureKey { let mut hasher = DefaultHasher::new(); drb_result.hash(&mut hasher); diff --git a/crates/types/src/event.rs b/crates/types/src/event.rs index c4ae586866..0a79913d88 100644 --- a/crates/types/src/event.rs +++ b/crates/types/src/event.rs @@ -11,12 +11,13 @@ use std::sync::Arc; use serde::{Deserialize, Serialize}; use crate::{ - data::{DaProposal, Leaf2, QuorumProposal2, UpgradeProposal, VidDisperseShare}, + data::{DaProposal2, Leaf2, QuorumProposal2, UpgradeProposal, VidDisperseShare}, error::HotShotError, message::Proposal, simple_certificate::QuorumCertificate2, traits::{node_implementation::NodeType, ValidatedState}, }; + /// A status event emitted by a `HotShot` instance /// /// This includes some metadata, such as the stage and view number that the event was generated in, @@ -151,7 +152,7 @@ pub enum EventType { /// or submitted to the network by us DaProposal { /// Contents of the proposal - proposal: Proposal>, + proposal: Proposal>, /// Public key of the leader submitting the proposal sender: TYPES::SignatureKey, }, diff --git a/crates/types/src/lib.rs b/crates/types/src/lib.rs index 5523b56b28..93076491e1 100644 --- a/crates/types/src/lib.rs +++ b/crates/types/src/lib.rs @@ -20,6 +20,8 @@ pub mod bundle; pub mod consensus; pub mod constants; pub mod data; +/// Holds the types and functions for DRB computation. +pub mod drb; pub mod error; pub mod event; /// Holds the configuration file specification for a HotShot node. diff --git a/crates/types/src/message.rs b/crates/types/src/message.rs index 00ed7d9ac5..062ae9c96f 100644 --- a/crates/types/src/message.rs +++ b/crates/types/src/message.rs @@ -26,16 +26,17 @@ use vbs::{ use crate::{ data::{ - DaProposal, Leaf, Leaf2, QuorumProposal, QuorumProposal2, UpgradeProposal, VidDisperseShare, + DaProposal, DaProposal2, Leaf, Leaf2, QuorumProposal, QuorumProposal2, UpgradeProposal, + VidDisperseShare, }, request_response::ProposalRequestPayload, simple_certificate::{ - DaCertificate, QuorumCertificate, UpgradeCertificate, ViewSyncCommitCertificate2, - ViewSyncFinalizeCertificate2, ViewSyncPreCommitCertificate2, + DaCertificate, DaCertificate2, QuorumCertificate2, UpgradeCertificate, + ViewSyncCommitCertificate, ViewSyncFinalizeCertificate, ViewSyncPreCommitCertificate, }, simple_vote::{ - DaVote, QuorumVote, TimeoutVote, UpgradeVote, ViewSyncCommitVote, ViewSyncFinalizeVote, - ViewSyncPreCommitVote, + DaVote, DaVote2, QuorumVote, QuorumVote2, TimeoutVote, UpgradeVote, ViewSyncCommitVote, + ViewSyncFinalizeVote, ViewSyncPreCommitVote, }, traits::{ election::Membership, @@ -183,13 +184,13 @@ pub enum GeneralConsensusMessage { ViewSyncFinalizeVote(ViewSyncFinalizeVote), /// Message with a view sync pre-commit certificate - ViewSyncPreCommitCertificate(ViewSyncPreCommitCertificate2), + ViewSyncPreCommitCertificate(ViewSyncPreCommitCertificate), /// Message with a view sync commit certificate - ViewSyncCommitCertificate(ViewSyncCommitCertificate2), + ViewSyncCommitCertificate(ViewSyncCommitCertificate), /// Message with a view sync finalize certificate - ViewSyncFinalizeCertificate(ViewSyncFinalizeCertificate2), + ViewSyncFinalizeCertificate(ViewSyncFinalizeCertificate), /// Message with a Timeout vote TimeoutVote(TimeoutVote), @@ -200,6 +201,15 @@ pub enum GeneralConsensusMessage { /// Message with an upgrade vote UpgradeVote(UpgradeVote), + /// Message for the next leader containing our highest QC + HighQc(QuorumCertificate2), + + /// Message with a quorum proposal. + Proposal2(Proposal>), + + /// Message with a quorum vote. + Vote2(QuorumVote2), + /// A peer node needs a proposal from the leader. ProposalRequested( ProposalRequestPayload, @@ -209,8 +219,8 @@ pub enum GeneralConsensusMessage { /// A replica has responded with a valid proposal. ProposalResponse(Proposal>), - /// Message for the next leader containing our highest QC - HighQc(QuorumCertificate), + /// A replica has responded with a valid proposal. + Proposal2Response(Proposal>), } #[derive(Deserialize, Serialize, Clone, Debug, PartialEq, Hash, Eq)] @@ -230,6 +240,15 @@ pub enum DaConsensusMessage { /// /// Like [`DaProposal`]. Use `Msg` suffix to distinguish from `VidDisperse`. VidDisperseMsg(Proposal>), + + /// Proposal for data availability committee + DaProposal2(Proposal>), + + /// vote for data availability committee + DaVote2(DaVote2), + + /// Certificate data is available + DaCertificate2(DaCertificate2), } /// Messages for sequencing consensus. @@ -258,6 +277,9 @@ impl SequencingMessage { GeneralConsensusMessage::ProposalResponse(proposal) => { proposal.data.view_number() } + GeneralConsensusMessage::Proposal2Response(proposal) => { + proposal.data.view_number() + } GeneralConsensusMessage::Vote(vote_message) => vote_message.view_number(), GeneralConsensusMessage::TimeoutVote(message) => message.view_number(), GeneralConsensusMessage::ViewSyncPreCommitVote(message) => { @@ -277,6 +299,12 @@ impl SequencingMessage { GeneralConsensusMessage::UpgradeProposal(message) => message.data.view_number(), GeneralConsensusMessage::UpgradeVote(message) => message.view_number(), GeneralConsensusMessage::HighQc(qc) => qc.view_number(), + GeneralConsensusMessage::Proposal2(p) => { + // view of leader in the leaf when proposal + // this should match replica upon receipt + p.data.view_number() + } + GeneralConsensusMessage::Vote2(vote_message) => vote_message.view_number(), } } SequencingMessage::Da(da_message) => { @@ -289,6 +317,13 @@ impl SequencingMessage { DaConsensusMessage::DaVote(vote_message) => vote_message.view_number(), DaConsensusMessage::DaCertificate(cert) => cert.view_number, DaConsensusMessage::VidDisperseMsg(disperse) => disperse.data.view_number(), + DaConsensusMessage::DaProposal2(p) => { + // view of leader in the leaf when proposal + // this should match replica upon receipt + p.data.view_number() + } + DaConsensusMessage::DaVote2(vote_message) => vote_message.view_number(), + DaConsensusMessage::DaCertificate2(cert) => cert.view_number, } } } diff --git a/crates/types/src/simple_certificate.rs b/crates/types/src/simple_certificate.rs index 271d5b0729..f88e549ede 100644 --- a/crates/types/src/simple_certificate.rs +++ b/crates/types/src/simple_certificate.rs @@ -24,8 +24,9 @@ use crate::{ data::serialize_signature2, message::UpgradeLock, simple_vote::{ - DaData, QuorumData, QuorumData2, QuorumMaker, TimeoutData, UpgradeProposalData, - VersionedVoteData, ViewSyncCommitData, ViewSyncFinalizeData, ViewSyncPreCommitData, + DaData, DaData2, QuorumData, QuorumData2, QuorumMarker, TimeoutData, TimeoutData2, + UpgradeProposalData, VersionedVoteData, ViewSyncCommitData, ViewSyncCommitData2, + ViewSyncFinalizeData, ViewSyncFinalizeData2, ViewSyncPreCommitData, ViewSyncPreCommitData2, Voteable, }, traits::{ @@ -39,7 +40,10 @@ use crate::{ /// Trait which allows use to inject different threshold calculations into a Certificate type pub trait Threshold { /// Calculate a threshold based on the membership - fn threshold>(membership: &MEMBERSHIP) -> u64; + fn threshold>( + membership: &MEMBERSHIP, + epoch: ::Epoch, + ) -> u64; } /// Defines a threshold which is 2f + 1 (Amount needed for Quorum) @@ -47,8 +51,11 @@ pub trait Threshold { pub struct SuccessThreshold {} impl Threshold for SuccessThreshold { - fn threshold>(membership: &MEMBERSHIP) -> u64 { - membership.success_threshold().into() + fn threshold>( + membership: &MEMBERSHIP, + epoch: ::Epoch, + ) -> u64 { + membership.success_threshold(epoch).into() } } @@ -57,8 +64,11 @@ impl Threshold for SuccessThreshold { pub struct OneHonestThreshold {} impl Threshold for OneHonestThreshold { - fn threshold>(membership: &MEMBERSHIP) -> u64 { - membership.failure_threshold().into() + fn threshold>( + membership: &MEMBERSHIP, + epoch: ::Epoch, + ) -> u64 { + membership.failure_threshold(epoch).into() } } @@ -67,8 +77,11 @@ impl Threshold for OneHonestThreshold { pub struct UpgradeThreshold {} impl Threshold for UpgradeThreshold { - fn threshold>(membership: &MEMBERSHIP) -> u64 { - membership.upgrade_threshold().into() + fn threshold>( + membership: &MEMBERSHIP, + epoch: ::Epoch, + ) -> u64 { + membership.upgrade_threshold(epoch).into() } } @@ -192,8 +205,11 @@ impl> Certificate ) -> usize { membership.da_total_nodes(epoch) } - fn threshold>(membership: &MEMBERSHIP) -> u64 { - membership.da_success_threshold().into() + fn threshold>( + membership: &MEMBERSHIP, + epoch: ::Epoch, + ) -> u64 { + membership.da_success_threshold(epoch).into() } fn data(&self) -> &Self::Voteable { &self.data @@ -210,7 +226,95 @@ impl> Certificate } } -impl> +impl> Certificate> + for SimpleCertificate, THRESHOLD> +{ + type Voteable = DaData2; + type Threshold = THRESHOLD; + + fn create_signed_certificate( + vote_commitment: Commitment, V>>, + data: Self::Voteable, + sig: ::QcType, + view: TYPES::View, + ) -> Self { + let vote_commitment_bytes: [u8; 32] = vote_commitment.into(); + + SimpleCertificate { + data, + vote_commitment: Commitment::from_raw(vote_commitment_bytes), + view_number: view, + signatures: Some(sig), + _pd: PhantomData, + } + } + async fn is_valid_cert( + &self, + stake_table: Vec<::StakeTableEntry>, + threshold: NonZeroU64, + upgrade_lock: &UpgradeLock, + ) -> bool { + if self.view_number == TYPES::View::genesis() { + return true; + } + let real_qc_pp = ::public_parameter( + stake_table, + U256::from(u64::from(threshold)), + ); + let Ok(commit) = self.data_commitment(upgrade_lock).await else { + return false; + }; + ::check( + &real_qc_pp, + commit.as_ref(), + self.signatures.as_ref().unwrap(), + ) + } + /// Proxy's to `Membership.stake` + fn stake_table_entry>( + membership: &MEMBERSHIP, + pub_key: &TYPES::SignatureKey, + epoch: TYPES::Epoch, + ) -> Option<::StakeTableEntry> { + membership.da_stake(pub_key, epoch) + } + + /// Proxy's to `Membership.da_stake_table` + fn stake_table>( + membership: &MEMBERSHIP, + epoch: TYPES::Epoch, + ) -> Vec<::StakeTableEntry> { + membership.da_stake_table(epoch) + } + /// Proxy's to `Membership.da_total_nodes` + fn total_nodes>( + membership: &MEMBERSHIP, + epoch: TYPES::Epoch, + ) -> usize { + membership.da_total_nodes(epoch) + } + fn threshold>( + membership: &MEMBERSHIP, + epoch: TYPES::Epoch, + ) -> u64 { + membership.da_success_threshold(epoch).into() + } + fn data(&self) -> &Self::Voteable { + &self.data + } + async fn data_commitment( + &self, + upgrade_lock: &UpgradeLock, + ) -> Result, V>>> { + Ok( + VersionedVoteData::new(self.data.clone(), self.view_number, upgrade_lock) + .await? + .commit(), + ) + } +} + +impl> Certificate for SimpleCertificate { type Voteable = VOTEABLE; @@ -254,8 +358,11 @@ impl>(membership: &MEMBERSHIP) -> u64 { - THRESHOLD::threshold(membership) + fn threshold>( + membership: &MEMBERSHIP, + epoch: ::Epoch, + ) -> u64 { + THRESHOLD::threshold(membership, epoch) } fn stake_table_entry>( @@ -345,7 +452,7 @@ impl UpgradeCertificate { ensure!( cert.is_valid_cert( quorum_membership.stake_table(epoch), - quorum_membership.upgrade_threshold(), + quorum_membership.upgrade_threshold(epoch), upgrade_lock ) .await, @@ -370,6 +477,7 @@ impl QuorumCertificate { let bytes: [u8; 32] = self.data.leaf_commit.into(); let data = QuorumData2 { leaf_commit: Commitment::from_raw(bytes), + epoch: TYPES::Epoch::new(0), }; let bytes: [u8; 32] = self.vote_commitment.into(); @@ -406,23 +514,248 @@ impl QuorumCertificate2 { } } +impl DaCertificate { + /// Convert a `DaCertificate` into a `DaCertificate2` + pub fn to_dac2(self) -> DaCertificate2 { + let data = DaData2 { + payload_commit: self.data.payload_commit, + epoch: TYPES::Epoch::new(0), + }; + + let bytes: [u8; 32] = self.vote_commitment.into(); + let vote_commitment = Commitment::from_raw(bytes); + + SimpleCertificate { + data, + vote_commitment, + view_number: self.view_number, + signatures: self.signatures.clone(), + _pd: PhantomData, + } + } +} + +impl DaCertificate2 { + /// Convert a `DaCertificate` into a `DaCertificate2` + pub fn to_dac(self) -> DaCertificate { + let data = DaData { + payload_commit: self.data.payload_commit, + }; + + let bytes: [u8; 32] = self.vote_commitment.into(); + let vote_commitment = Commitment::from_raw(bytes); + + SimpleCertificate { + data, + vote_commitment, + view_number: self.view_number, + signatures: self.signatures.clone(), + _pd: PhantomData, + } + } +} + +impl ViewSyncPreCommitCertificate { + /// Convert a `DaCertificate` into a `DaCertificate2` + pub fn to_vsc2(self) -> ViewSyncPreCommitCertificate2 { + let data = ViewSyncPreCommitData2 { + relay: self.data.relay, + round: self.data.round, + epoch: TYPES::Epoch::new(0), + }; + + let bytes: [u8; 32] = self.vote_commitment.into(); + let vote_commitment = Commitment::from_raw(bytes); + + SimpleCertificate { + data, + vote_commitment, + view_number: self.view_number, + signatures: self.signatures.clone(), + _pd: PhantomData, + } + } +} + +impl ViewSyncPreCommitCertificate2 { + /// Convert a `DaCertificate` into a `DaCertificate2` + pub fn to_vsc(self) -> ViewSyncPreCommitCertificate { + let data = ViewSyncPreCommitData { + relay: self.data.relay, + round: self.data.round, + }; + + let bytes: [u8; 32] = self.vote_commitment.into(); + let vote_commitment = Commitment::from_raw(bytes); + + SimpleCertificate { + data, + vote_commitment, + view_number: self.view_number, + signatures: self.signatures.clone(), + _pd: PhantomData, + } + } +} + +impl ViewSyncCommitCertificate { + /// Convert a `DaCertificate` into a `DaCertificate2` + pub fn to_vsc2(self) -> ViewSyncCommitCertificate2 { + let data = ViewSyncCommitData2 { + relay: self.data.relay, + round: self.data.round, + epoch: TYPES::Epoch::new(0), + }; + + let bytes: [u8; 32] = self.vote_commitment.into(); + let vote_commitment = Commitment::from_raw(bytes); + + SimpleCertificate { + data, + vote_commitment, + view_number: self.view_number, + signatures: self.signatures.clone(), + _pd: PhantomData, + } + } +} + +impl ViewSyncCommitCertificate2 { + /// Convert a `DaCertificate` into a `DaCertificate2` + pub fn to_vsc(self) -> ViewSyncCommitCertificate { + let data = ViewSyncCommitData { + relay: self.data.relay, + round: self.data.round, + }; + + let bytes: [u8; 32] = self.vote_commitment.into(); + let vote_commitment = Commitment::from_raw(bytes); + + SimpleCertificate { + data, + vote_commitment, + view_number: self.view_number, + signatures: self.signatures.clone(), + _pd: PhantomData, + } + } +} + +impl ViewSyncFinalizeCertificate { + /// Convert a `DaCertificate` into a `DaCertificate2` + pub fn to_vsc2(self) -> ViewSyncFinalizeCertificate2 { + let data = ViewSyncFinalizeData2 { + relay: self.data.relay, + round: self.data.round, + epoch: TYPES::Epoch::new(0), + }; + + let bytes: [u8; 32] = self.vote_commitment.into(); + let vote_commitment = Commitment::from_raw(bytes); + + SimpleCertificate { + data, + vote_commitment, + view_number: self.view_number, + signatures: self.signatures.clone(), + _pd: PhantomData, + } + } +} + +impl ViewSyncFinalizeCertificate2 { + /// Convert a `DaCertificate` into a `DaCertificate2` + pub fn to_vsc(self) -> ViewSyncFinalizeCertificate { + let data = ViewSyncFinalizeData { + relay: self.data.relay, + round: self.data.round, + }; + + let bytes: [u8; 32] = self.vote_commitment.into(); + let vote_commitment = Commitment::from_raw(bytes); + + SimpleCertificate { + data, + vote_commitment, + view_number: self.view_number, + signatures: self.signatures.clone(), + _pd: PhantomData, + } + } +} + +impl TimeoutCertificate { + /// Convert a `DaCertificate` into a `DaCertificate2` + pub fn to_vsc2(self) -> TimeoutCertificate2 { + let data = TimeoutData2 { + view: self.data.view, + epoch: TYPES::Epoch::new(0), + }; + + let bytes: [u8; 32] = self.vote_commitment.into(); + let vote_commitment = Commitment::from_raw(bytes); + + SimpleCertificate { + data, + vote_commitment, + view_number: self.view_number, + signatures: self.signatures.clone(), + _pd: PhantomData, + } + } +} + +impl TimeoutCertificate2 { + /// Convert a `DaCertificate` into a `DaCertificate2` + pub fn to_vsc(self) -> TimeoutCertificate { + let data = TimeoutData { + view: self.data.view, + }; + + let bytes: [u8; 32] = self.vote_commitment.into(); + let vote_commitment = Commitment::from_raw(bytes); + + SimpleCertificate { + data, + vote_commitment, + view_number: self.view_number, + signatures: self.signatures.clone(), + _pd: PhantomData, + } + } +} + /// Type alias for a `QuorumCertificate`, which is a `SimpleCertificate` over `QuorumData` pub type QuorumCertificate = SimpleCertificate, SuccessThreshold>; /// Type alias for a `QuorumCertificate2`, which is a `SimpleCertificate` over `QuorumData2` pub type QuorumCertificate2 = SimpleCertificate, SuccessThreshold>; -/// Type alias for a DA certificate over `DaData` +/// Type alias for a `DaCertificate`, which is a `SimpleCertificate` over `DaData` pub type DaCertificate = SimpleCertificate; +/// Type alias for a `DaCertificate2`, which is a `SimpleCertificate` over `DaData2` +pub type DaCertificate2 = SimpleCertificate, SuccessThreshold>; /// Type alias for a Timeout certificate over a view number pub type TimeoutCertificate = SimpleCertificate, SuccessThreshold>; +/// Type alias for a `TimeoutCertificate2`, which is a `SimpleCertificate` over `TimeoutData2` +pub type TimeoutCertificate2 = + SimpleCertificate, SuccessThreshold>; /// Type alias for a `ViewSyncPreCommit` certificate over a view number -pub type ViewSyncPreCommitCertificate2 = +pub type ViewSyncPreCommitCertificate = SimpleCertificate, OneHonestThreshold>; +/// Type alias for a `ViewSyncPreCommitCertificate2`, which is a `SimpleCertificate` over `ViewSyncPreCommitData2` +pub type ViewSyncPreCommitCertificate2 = + SimpleCertificate, OneHonestThreshold>; /// Type alias for a `ViewSyncCommit` certificate over a view number -pub type ViewSyncCommitCertificate2 = +pub type ViewSyncCommitCertificate = SimpleCertificate, SuccessThreshold>; +/// Type alias for a `ViewSyncCommitCertificate2`, which is a `SimpleCertificate` over `ViewSyncCommitData2` +pub type ViewSyncCommitCertificate2 = + SimpleCertificate, SuccessThreshold>; /// Type alias for a `ViewSyncFinalize` certificate over a view number -pub type ViewSyncFinalizeCertificate2 = +pub type ViewSyncFinalizeCertificate = SimpleCertificate, SuccessThreshold>; +/// Type alias for a `ViewSyncFinalizeCertificate2`, which is a `SimpleCertificate` over `ViewSyncFinalizeData2` +pub type ViewSyncFinalizeCertificate2 = + SimpleCertificate, SuccessThreshold>; /// Type alias for a `UpgradeCertificate`, which is a `SimpleCertificate` of `UpgradeProposalData` pub type UpgradeCertificate = SimpleCertificate, UpgradeThreshold>; diff --git a/crates/types/src/simple_vote.rs b/crates/types/src/simple_vote.rs index 5596956f7c..a682ba413b 100644 --- a/crates/types/src/simple_vote.rs +++ b/crates/types/src/simple_vote.rs @@ -17,7 +17,7 @@ use crate::{ data::{Leaf, Leaf2}, message::UpgradeLock, traits::{ - node_implementation::{NodeType, Versions}, + node_implementation::{ConsensusTime, NodeType, Versions}, signature_key::SignatureKey, }, vid::VidCommitment, @@ -25,7 +25,7 @@ use crate::{ }; /// Marker that data should use the quorum cert type -pub(crate) trait QuorumMaker {} +pub(crate) trait QuorumMarker {} #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Hash, Eq)] /// Data used for a yes vote. @@ -40,6 +40,8 @@ pub struct QuorumData { pub struct QuorumData2 { /// Commitment to the leaf pub leaf_commit: Commitment>, + /// Epoch number + pub epoch: TYPES::Epoch, } #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Hash, Eq)] /// Data used for a DA vote. @@ -48,12 +50,27 @@ pub struct DaData { pub payload_commit: VidCommitment, } #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Hash, Eq)] +/// Data used for a DA vote. +pub struct DaData2 { + /// Commitment to a block payload + pub payload_commit: VidCommitment, + /// Epoch number + pub epoch: TYPES::Epoch, +} +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Hash, Eq)] /// Data used for a timeout vote. pub struct TimeoutData { /// View the timeout is for pub view: TYPES::View, } - +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Hash, Eq)] +/// Data used for a timeout vote. +pub struct TimeoutData2 { + /// View the timeout is for + pub view: TYPES::View, + /// Epoch number + pub epoch: TYPES::Epoch, +} #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Hash, Eq)] /// Data used for a Pre Commit vote. pub struct ViewSyncPreCommitData { @@ -63,6 +80,16 @@ pub struct ViewSyncPreCommitData { pub round: TYPES::View, } #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Hash, Eq)] +/// Data used for a Pre Commit vote. +pub struct ViewSyncPreCommitData2 { + /// The relay this vote is intended for + pub relay: u64, + /// The view number we are trying to sync on + pub round: TYPES::View, + /// Epoch number + pub epoch: TYPES::Epoch, +} +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Hash, Eq)] /// Data used for a Commit vote. pub struct ViewSyncCommitData { /// The relay this vote is intended for @@ -71,6 +98,16 @@ pub struct ViewSyncCommitData { pub round: TYPES::View, } #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Hash, Eq)] +/// Data used for a Commit vote. +pub struct ViewSyncCommitData2 { + /// The relay this vote is intended for + pub relay: u64, + /// The view number we are trying to sync on + pub round: TYPES::View, + /// Epoch number + pub epoch: TYPES::Epoch, +} +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Hash, Eq)] /// Data used for a Finalize vote. pub struct ViewSyncFinalizeData { /// The relay this vote is intended for @@ -79,6 +116,16 @@ pub struct ViewSyncFinalizeData { pub round: TYPES::View, } #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Hash, Eq)] +/// Data used for a Finalize vote. +pub struct ViewSyncFinalizeData2 { + /// The relay this vote is intended for + pub relay: u64, + /// The view number we are trying to sync on + pub round: TYPES::View, + /// Epoch number + pub epoch: TYPES::Epoch, +} +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Hash, Eq)] /// Data used for a Upgrade vote. pub struct UpgradeProposalData { /// The old version that we are upgrading from. @@ -96,6 +143,18 @@ pub struct UpgradeProposalData { pub new_version_first_view: TYPES::View, } +/// Data used for an upgrade once epochs are implemented +pub struct UpgradeData2 { + /// The old version that we are upgrading from + pub old_version: Version, + /// The new version that we are upgrading to + pub new_version: Version, + /// A unique identifier for the specific protocol being voted on + pub hash: Vec, + /// The first epoch in which the upgrade will be in effect + pub epoch: TYPES::Epoch, +} + /// Marker trait for data or commitments that can be voted on. /// Only structs in this file can implement voteable. This is enforced with the `Sealed` trait /// Sealing this trait prevents creating new vote types outside this file. @@ -117,13 +176,17 @@ mod sealed { impl Sealed for C {} } -impl QuorumMaker for QuorumData {} -impl QuorumMaker for QuorumData2 {} -impl QuorumMaker for TimeoutData {} -impl QuorumMaker for ViewSyncPreCommitData {} -impl QuorumMaker for ViewSyncCommitData {} -impl QuorumMaker for ViewSyncFinalizeData {} -impl QuorumMaker for UpgradeProposalData {} +impl QuorumMarker for QuorumData {} +impl QuorumMarker for QuorumData2 {} +impl QuorumMarker for TimeoutData {} +impl QuorumMarker for TimeoutData2 {} +impl QuorumMarker for ViewSyncPreCommitData {} +impl QuorumMarker for ViewSyncCommitData {} +impl QuorumMarker for ViewSyncFinalizeData {} +impl QuorumMarker for ViewSyncPreCommitData2 {} +impl QuorumMarker for ViewSyncCommitData2 {} +impl QuorumMarker for ViewSyncFinalizeData2 {} +impl QuorumMarker for UpgradeProposalData {} /// A simple yes vote over some votable type. #[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Hash, Eq)] @@ -272,9 +335,18 @@ impl Committable for QuorumData { impl Committable for QuorumData2 { fn commit(&self) -> Commitment { - committable::RawCommitmentBuilder::new("Quorum data") - .var_size_bytes(self.leaf_commit.as_ref()) - .finalize() + let QuorumData2 { leaf_commit, epoch } = self; + + if **epoch == 0 { + committable::RawCommitmentBuilder::new("Quorum data") + .var_size_bytes(leaf_commit.as_ref()) + .finalize() + } else { + committable::RawCommitmentBuilder::new("Quorum data") + .var_size_bytes(leaf_commit.as_ref()) + .u64(**epoch) + .finalize() + } } } @@ -286,6 +358,23 @@ impl Committable for TimeoutData { } } +impl Committable for TimeoutData2 { + fn commit(&self) -> Commitment { + let TimeoutData2 { view, epoch } = self; + + if **epoch == 0 { + committable::RawCommitmentBuilder::new("Timeout data") + .u64(**view) + .finalize() + } else { + committable::RawCommitmentBuilder::new("Timeout data") + .u64(**view) + .u64(**epoch) + .finalize() + } + } +} + impl Committable for DaData { fn commit(&self) -> Commitment { committable::RawCommitmentBuilder::new("DA data") @@ -294,6 +383,25 @@ impl Committable for DaData { } } +impl Committable for DaData2 { + fn commit(&self) -> Commitment { + let DaData2 { + payload_commit, + epoch, + } = self; + if **epoch == 0 { + committable::RawCommitmentBuilder::new("DA data") + .var_size_bytes(payload_commit.as_ref()) + .finalize() + } else { + committable::RawCommitmentBuilder::new("DA data") + .var_size_bytes(payload_commit.as_ref()) + .u64(**epoch) + .finalize() + } + } +} + impl Committable for UpgradeProposalData { fn commit(&self) -> Commitment { let builder = committable::RawCommitmentBuilder::new("Upgrade data"); @@ -310,6 +418,26 @@ impl Committable for UpgradeProposalData { } } +impl Committable for UpgradeData2 { + fn commit(&self) -> Commitment { + let UpgradeData2 { + old_version, + new_version, + hash, + epoch, + } = self; + + committable::RawCommitmentBuilder::new("Upgrade data") + .u16(old_version.minor) + .u16(old_version.major) + .u16(new_version.minor) + .u16(new_version.major) + .var_size_bytes(hash.as_slice()) + .u64(**epoch) + .finalize() + } +} + /// This implements commit for all the types which contain a view and relay public key. fn view_and_relay_commit( view: TYPES::View, @@ -326,18 +454,83 @@ impl Committable for ViewSyncPreCommitData { } } +impl Committable for ViewSyncPreCommitData2 { + fn commit(&self) -> Commitment { + let ViewSyncPreCommitData2 { + relay, + round, + epoch, + } = self; + + if **epoch == 0 { + view_and_relay_commit::(*round, *relay, "View Sync Precommit") + } else { + committable::RawCommitmentBuilder::new("View Sync Precommit") + .u64(*relay) + .u64(**round) + .u64(**epoch) + .finalize() + } + } +} + impl Committable for ViewSyncFinalizeData { fn commit(&self) -> Commitment { view_and_relay_commit::(self.round, self.relay, "View Sync Finalize") } } + +impl Committable for ViewSyncFinalizeData2 { + fn commit(&self) -> Commitment { + let ViewSyncFinalizeData2 { + relay, + round, + epoch, + } = self; + + if **epoch == 0 { + view_and_relay_commit::(*round, *relay, "View Sync Finalize") + } else { + committable::RawCommitmentBuilder::new("View Sync Finalize") + .u64(*relay) + .u64(**round) + .u64(**epoch) + .finalize() + } + } +} + impl Committable for ViewSyncCommitData { fn commit(&self) -> Commitment { view_and_relay_commit::(self.round, self.relay, "View Sync Commit") } } + // impl votable for all the data types in this file sealed marker should ensure nothing is accidentally + +impl Committable for ViewSyncCommitData2 { + fn commit(&self) -> Commitment { + let ViewSyncCommitData2 { + relay, + round, + epoch, + } = self; + + if **epoch == 0 { + view_and_relay_commit::(*round, *relay, "View Sync Commit") + } else { + committable::RawCommitmentBuilder::new("View Sync Commit") + .u64(*relay) + .u64(**round) + .u64(**epoch) + .finalize() + } + } +} + +// impl votable for all the data types in this file sealed marker should ensure nothing is accidently + // implemented for structs that aren't "voteable" impl Voteable for V @@ -352,6 +545,7 @@ impl QuorumVote { let signature = self.signature; let data = QuorumData2 { leaf_commit: Commitment::from_raw(bytes), + epoch: TYPES::Epoch::new(0), }; let view_number = self.view_number; @@ -382,21 +576,73 @@ impl QuorumVote2 { } } +impl DaVote { + /// Convert a `QuorumVote` to a `QuorumVote2` + pub fn to_vote2(self) -> DaVote2 { + let signature = self.signature; + let data = DaData2 { + payload_commit: self.data.payload_commit, + epoch: TYPES::Epoch::new(0), + }; + let view_number = self.view_number; + + SimpleVote { + signature, + data, + view_number, + } + } +} + +impl DaVote2 { + /// Convert a `QuorumVote2` to a `QuorumVote` + pub fn to_vote(self) -> DaVote { + let signature = self.signature; + let data = DaData { + payload_commit: self.data.payload_commit, + }; + let view_number = self.view_number; + + SimpleVote { + signature, + data, + view_number, + } + } +} + // Type aliases for simple use of all the main votes. We should never see `SimpleVote` outside this file + /// Quorum vote Alias pub type QuorumVote = SimpleVote>; // Type aliases for simple use of all the main votes. We should never see `SimpleVote` outside this file /// Quorum vote Alias pub type QuorumVote2 = SimpleVote>; + /// DA vote type alias pub type DaVote = SimpleVote; +/// DA vote 2 type alias +pub type DaVote2 = SimpleVote>; + /// Timeout Vote type alias pub type TimeoutVote = SimpleVote>; -/// View Sync Commit Vote type alias -pub type ViewSyncCommitVote = SimpleVote>; +/// Timeout Vote 2 type alias +pub type TimeoutVote2 = SimpleVote>; + /// View Sync Pre Commit Vote type alias pub type ViewSyncPreCommitVote = SimpleVote>; +/// View Sync Pre Commit Vote 2 type alias +pub type ViewSyncPreCommitVote2 = SimpleVote>; /// View Sync Finalize Vote type alias pub type ViewSyncFinalizeVote = SimpleVote>; +/// View Sync Finalize Vote 2 type alias +pub type ViewSyncFinalizeVote2 = SimpleVote>; +/// View Sync Commit Vote type alias +pub type ViewSyncCommitVote = SimpleVote>; +/// View Sync Commit Vote 2 type alias +pub type ViewSyncCommitVote2 = SimpleVote>; + /// Upgrade proposal vote pub type UpgradeVote = SimpleVote>; +/// Upgrade proposal 2 vote +pub type UpgradeVote2 = SimpleVote>; diff --git a/crates/types/src/traits/election.rs b/crates/types/src/traits/election.rs index 04aa76ccb4..5b72ea4f84 100644 --- a/crates/types/src/traits/election.rs +++ b/crates/types/src/traits/election.rs @@ -115,14 +115,14 @@ pub trait Membership: Clone + Debug + Send + Sync { fn da_total_nodes(&self, epoch: TYPES::Epoch) -> usize; /// Returns the threshold for a specific `Membership` implementation - fn success_threshold(&self) -> NonZeroU64; + fn success_threshold(&self, epoch: TYPES::Epoch) -> NonZeroU64; /// Returns the DA threshold for a specific `Membership` implementation - fn da_success_threshold(&self) -> NonZeroU64; + fn da_success_threshold(&self, epoch: TYPES::Epoch) -> NonZeroU64; /// Returns the threshold for a specific `Membership` implementation - fn failure_threshold(&self) -> NonZeroU64; + fn failure_threshold(&self, epoch: TYPES::Epoch) -> NonZeroU64; /// Returns the threshold required to upgrade the network protocol - fn upgrade_threshold(&self) -> NonZeroU64; + fn upgrade_threshold(&self, epoch: TYPES::Epoch) -> NonZeroU64; } diff --git a/crates/types/src/traits/metrics.rs b/crates/types/src/traits/metrics.rs index 38d9a8d6ea..cd29c75da5 100644 --- a/crates/types/src/traits/metrics.rs +++ b/crates/types/src/traits/metrics.rs @@ -212,13 +212,14 @@ pub trait Counter: Send + Sync + Debug + DynClone { /// Add a value to the counter fn add(&self, amount: usize); } + /// A gauge that stores the latest value. pub trait Gauge: Send + Sync + Debug + DynClone { /// Set the gauge value fn set(&self, amount: usize); /// Update the gauge value - fn update(&self, delts: i64); + fn update(&self, delta: i64); } /// A histogram which will record a series of points. diff --git a/crates/types/src/traits/storage.rs b/crates/types/src/traits/storage.rs index 7782ef0101..a0e226cdbd 100644 --- a/crates/types/src/traits/storage.rs +++ b/crates/types/src/traits/storage.rs @@ -18,7 +18,9 @@ use jf_vid::VidScheme; use super::node_implementation::NodeType; use crate::{ consensus::{CommitmentMap, View}, - data::{DaProposal, Leaf, Leaf2, QuorumProposal, QuorumProposal2, VidDisperseShare}, + data::{ + DaProposal, DaProposal2, Leaf, Leaf2, QuorumProposal, QuorumProposal2, VidDisperseShare, + }, event::HotShotAction, message::Proposal, simple_certificate::{QuorumCertificate, QuorumCertificate2, UpgradeCertificate}, @@ -36,6 +38,12 @@ pub trait Storage: Send + Sync + Clone { proposal: &Proposal>, vid_commit: ::Commit, ) -> Result<()>; + /// Add a proposal to the stored DA proposals. + async fn append_da2( + &self, + proposal: &Proposal>, + vid_commit: ::Commit, + ) -> Result<()>; /// Add a proposal we sent to the store async fn append_proposal( &self, diff --git a/crates/types/src/vote.rs b/crates/types/src/vote.rs index bdff9d4bb5..13112afa12 100644 --- a/crates/types/src/vote.rs +++ b/crates/types/src/vote.rs @@ -81,7 +81,10 @@ pub trait Certificate: HasViewNumber { ) -> impl std::future::Future; /// Returns the amount of stake needed to create this certificate // TODO: Make this a static ratio of the total stake of `Membership` - fn threshold>(membership: &MEMBERSHIP) -> u64; + fn threshold>( + membership: &MEMBERSHIP, + epoch: ::Epoch, + ) -> u64; /// Get Stake Table from Membership implementation. fn stake_table>( @@ -220,12 +223,12 @@ impl< *total_stake_casted += stake_table_entry.stake(); total_vote_map.insert(key, (vote.signature(), vote_commitment)); - if *total_stake_casted >= CERT::threshold(membership).into() { + if *total_stake_casted >= CERT::threshold(membership, epoch).into() { // Assemble QC let real_qc_pp: <::SignatureKey as SignatureKey>::QcParams = ::public_parameter( stake_table, - U256::from(CERT::threshold(membership)), + U256::from(CERT::threshold(membership, epoch)), ); let real_qc_sig = ::assemble( diff --git a/scripts/benchmark_scripts/aws_ecs_benchmarks_cdn_gpu.sh b/scripts/benchmark_scripts/aws_ecs_benchmarks_cdn_gpu.sh index 9e486f9cea..ad8bdb8b87 100755 --- a/scripts/benchmark_scripts/aws_ecs_benchmarks_cdn_gpu.sh +++ b/scripts/benchmark_scripts/aws_ecs_benchmarks_cdn_gpu.sh @@ -165,4 +165,4 @@ done # shut down all related threads echo -e "\e[35mGoing to stop cdn-marshal\e[0m" killall -9 cdn-marshal -# for pid in $(ps -ef | grep "keydb-server" | awk '{print $2}'); do sudo kill -9 $pid; done \ No newline at end of file +# for pid in $(ps -ef | grep "keydb-server" | awk '{print $2}'); do sudo kill -9 $pid; done