From 34726b91f50d13714be7667e7c929e6440d03dd7 Mon Sep 17 00:00:00 2001 From: Javier Sagredo Date: Thu, 19 Sep 2024 17:44:33 +0200 Subject: [PATCH] UTXO-HD --- .github/workflows/ci.yml | 5 + CONTRIBUTING.md | 35 + .../report/chapters/storage/ledgerdb.tex | 92 +- .../contents/about-ouroboros/utxo-hd.md | 33 + .../for-developers/utxo-hd/Overview.md | 3 + .../utxo-hd/future-ledger-hd.md | 258 + .../for-developers/utxo-hd/utxo-hd.md | 302 ++ docs/website/sidebars.js | 9 + .../img/utxo-hd/utxo-hd-replay-01-19-23.png | Bin 0 -> 75262 bytes .../img/utxo-hd/utxo-hd-sync-01-19-23.png | Bin 0 -> 68662 bytes .../app/DBAnalyser/Parsers.hs | 16 +- .../app/db-synthesizer.hs | 2 +- .../app/snapshot-converter.hs | 259 + .../golden/byron/disk/LedgerTables | 1 + .../Query_Allegra_GetBigLedgerPeerSnapshot | 2 +- .../Query_Alonzo_GetBigLedgerPeerSnapshot | 2 +- .../Query_Mary_GetBigLedgerPeerSnapshot | 2 +- .../Query_Shelley_GetBigLedgerPeerSnapshot | 2 +- .../Query_Allegra_GetBigLedgerPeerSnapshot | 2 +- .../Query_Alonzo_GetBigLedgerPeerSnapshot | 2 +- .../Query_Mary_GetBigLedgerPeerSnapshot | 2 +- .../Query_Shelley_GetBigLedgerPeerSnapshot | 2 +- .../Query_Allegra_GetBigLedgerPeerSnapshot | 2 +- .../Query_Alonzo_GetBigLedgerPeerSnapshot | 2 +- .../Query_Mary_GetBigLedgerPeerSnapshot | 2 +- .../Query_Shelley_GetBigLedgerPeerSnapshot | 2 +- .../Query_Allegra_GetBigLedgerPeerSnapshot | 2 +- .../Query_Alonzo_GetBigLedgerPeerSnapshot | 2 +- .../Query_Mary_GetBigLedgerPeerSnapshot | 2 +- .../Query_Shelley_GetBigLedgerPeerSnapshot | 2 +- .../Query_Allegra_GetBigLedgerPeerSnapshot | 2 +- .../Query_Alonzo_GetBigLedgerPeerSnapshot | 2 +- .../Query_Mary_GetBigLedgerPeerSnapshot | 2 +- .../Query_Shelley_GetBigLedgerPeerSnapshot | 2 +- .../Query_Allegra_GetBigLedgerPeerSnapshot | 2 +- .../Query_Alonzo_GetBigLedgerPeerSnapshot | 2 +- .../Query_Mary_GetBigLedgerPeerSnapshot | 2 +- .../Query_Shelley_GetBigLedgerPeerSnapshot | 2 +- .../Query_Allegra_GetBigLedgerPeerSnapshot | 2 +- .../Query_Alonzo_GetBigLedgerPeerSnapshot | 2 +- .../Query_Mary_GetBigLedgerPeerSnapshot | 2 +- .../Query_Shelley_GetBigLedgerPeerSnapshot | 2 +- .../Query_Allegra_GetBigLedgerPeerSnapshot | 2 +- .../Query_Alonzo_GetBigLedgerPeerSnapshot | 2 +- .../Query_Mary_GetBigLedgerPeerSnapshot | 2 +- .../Query_Shelley_GetBigLedgerPeerSnapshot | 2 +- .../Query_Allegra_GetBigLedgerPeerSnapshot | 2 +- .../Query_Alonzo_GetBigLedgerPeerSnapshot | 2 +- .../Query_Mary_GetBigLedgerPeerSnapshot | 2 +- .../Query_Shelley_GetBigLedgerPeerSnapshot | 2 +- .../golden/cardano/disk/LedgerTables_Allegra | Bin 0 -> 101 bytes .../golden/cardano/disk/LedgerTables_Alonzo | Bin 0 -> 183 bytes .../golden/cardano/disk/LedgerTables_Babbage | Bin 0 -> 174 bytes .../golden/cardano/disk/LedgerTables_Byron | 1 + .../golden/cardano/disk/LedgerTables_Conway | Bin 0 -> 174 bytes .../golden/cardano/disk/LedgerTables_Mary | Bin 0 -> 149 bytes .../golden/cardano/disk/LedgerTables_Shelley | Bin 0 -> 105 bytes .../Query_GetBigLedgerPeerSnapshot | 2 +- .../Query_GetBigLedgerPeerSnapshot | 2 +- .../Query_GetBigLedgerPeerSnapshot | 2 +- .../Query_GetBigLedgerPeerSnapshot | 2 +- .../Query_GetBigLedgerPeerSnapshot | 2 +- .../Query_GetBigLedgerPeerSnapshot | 2 +- .../Query_GetBigLedgerPeerSnapshot | 2 +- .../golden/shelley/disk/LedgerTables | Bin 0 -> 103 bytes .../ouroboros-consensus-cardano.cabal | 39 +- .../Ouroboros/Consensus/Byron/Ledger/Forge.hs | 4 +- .../Consensus/Byron/Ledger/Inspect.hs | 2 +- .../Consensus/Byron/Ledger/Ledger.hs | 84 +- .../Consensus/Byron/Ledger/Mempool.hs | 7 +- .../Consensus/Byron/Node/Serialisation.hs | 15 +- .../Ouroboros/Consensus/Cardano/Block.hs | 90 +- .../Ouroboros/Consensus/Cardano/ByronHFC.hs | 51 + .../Consensus/Cardano/CanHardFork.hs | 248 +- .../Ouroboros/Consensus/Cardano/Ledger.hs | 209 + .../Ouroboros/Consensus/Cardano/Node.hs | 32 +- .../Ouroboros/Consensus/Cardano/QueryHF.hs | 142 + .../Ouroboros/Consensus/Shelley/Eras.hs | 3 + .../Consensus/Shelley/Ledger/Forge.hs | 10 +- .../Consensus/Shelley/Ledger/Inspect.hs | 4 +- .../Consensus/Shelley/Ledger/Ledger.hs | 258 +- .../Consensus/Shelley/Ledger/Mempool.hs | 67 +- .../Consensus/Shelley/Ledger/Query.hs | 513 +- .../Shelley/Ledger/SupportsProtocol.hs | 6 +- .../Consensus/Shelley/Node/Serialisation.hs | 21 +- .../Consensus/Shelley/Node/TPraos.hs | 12 +- .../Ouroboros/Consensus/Shelley/ShelleyHFC.hs | 99 +- .../Ouroboros/Consensus/ByronDual/Ledger.hs | 2 +- .../Ouroboros/Consensus/ByronDual/Node.hs | 6 +- .../Consensus/ByronDual/Node/Serialisation.hs | 17 +- .../Test/Consensus/Byron/Examples.hs | 30 +- .../Test/Consensus/Byron/Generators.hs | 53 +- .../ThreadNet/Infra/Byron/TrackUpdates.hs | 4 +- .../Consensus/ByronSpec/Ledger/Forge.hs | 2 +- .../Consensus/ByronSpec/Ledger/Ledger.hs | 47 +- .../Consensus/ByronSpec/Ledger/Mempool.hs | 13 +- .../Test/Consensus/Cardano/Examples.hs | 75 +- .../Test/Consensus/Cardano/Generators.hs | 34 +- .../ThreadNet/Infra/ShelleyBasedHardFork.hs | 119 +- .../Test/ThreadNet/TxGen/Cardano.hs | 32 +- .../Cardano/Api/Protocol/Types.hs | 1 + .../Cardano/Tools/DBAnalyser/Analysis.hs | 301 +- .../Cardano/Tools/DBAnalyser/Block/Cardano.hs | 27 +- .../Cardano/Tools/DBAnalyser/HasAnalysis.hs | 4 +- .../Cardano/Tools/DBAnalyser/Run.hs | 121 +- .../Cardano/Tools/DBAnalyser/Types.hs | 3 + .../Cardano/Tools/DBSynthesizer/Forging.hs | 26 +- .../Cardano/Tools/DBSynthesizer/Run.hs | 14 +- .../Cardano/Tools/DBTruncater/Run.hs | 3 +- .../Test/Consensus/Shelley/Examples.hs | 73 +- .../Test/Consensus/Shelley/Generators.hs | 41 +- .../Test/ThreadNet/TxGen/Shelley.hs | 19 +- .../test/byron-test/Main.hs | 2 + .../Test/Consensus/Byron/LedgerTables.hs | 15 + .../test/byron-test/Test/ThreadNet/Byron.hs | 4 +- .../byron-test/Test/ThreadNet/DualByron.hs | 14 +- .../test/cardano-test/Main.hs | 4 +- .../Consensus/Cardano/ByronCompatibility.hs | 82 +- .../Test/Consensus/Cardano/Translation.hs | 396 ++ .../cardano-test/Test/ThreadNet/Cardano.hs | 10 +- .../test/shelley-test/Main.hs | 2 + .../Test/Consensus/Shelley/LedgerTables.hs | 64 + .../shelley-test/Test/ThreadNet/Shelley.hs | 3 +- .../test/tools-test/Main.hs | 5 +- .../ouroboros-consensus-diffusion.cabal | 5 + .../Consensus/Network/NodeToClient.hs | 42 +- .../Ouroboros/Consensus/Node.hs | 79 +- .../Ouroboros/Consensus/Node/GSM.hs | 4 +- .../Ouroboros/Consensus/NodeKernel.hs | 380 +- .../Test/ThreadNet/General.hs | 6 +- .../Test/ThreadNet/Network.hs | 163 +- .../Test/ThreadNet/TxGen.hs | 11 +- .../Test/Consensus/Ledger/Mock/Generators.hs | 22 +- .../Test/ThreadNet/TxGen/Mock.hs | 3 +- .../Test/Consensus/HardFork/Combinator.hs | 78 +- .../Test/Consensus/HardFork/Combinator/A.hs | 88 +- .../Test/Consensus/HardFork/Combinator/B.hs | 67 +- .../IOSimQSM/Test/StateMachine/Sequential.hs | 6 +- .../Consensus/PeerSimulator/NodeLifecycle.hs | 7 +- .../Test/Consensus/PeerSimulator/Run.hs | 14 +- .../test/mock-test/Main.hs | 9 +- .../Consensus/Ledger/Mock/LedgerTables.hs | 24 + .../bench/ChainSync-client-bench/Main.hs | 7 +- .../backingstore-bench/Bench/Commands.hs | 220 + .../bench/backingstore-bench/Main.hs | 247 + .../mempool-bench/Bench/Consensus/Mempool.hs | 1 + .../Bench/Consensus/Mempool/TestBlock.hs | 122 +- .../bench/mempool-bench/Main.hs | 1 - ouroboros-consensus/docs/haddocks/bogus.svg | 4 + ouroboros-consensus/ouroboros-consensus.cabal | 104 +- .../Ouroboros/Consensus/Block/Forging.hs | 10 +- .../BlockchainTime/WallClock/HardFork.hs | 4 +- .../Ouroboros/Consensus/Forecast.hs | 2 +- .../Ouroboros/Consensus/Fragment/Validated.hs | 66 +- .../Consensus/Fragment/ValidatedDiff.hs | 55 +- .../Ouroboros/Consensus/Genesis/Governor.hs | 3 +- .../Ouroboros/Consensus/HardFork/Abstract.hs | 4 +- .../Combinator/Abstract/SingleEraBlock.hs | 9 +- .../Consensus/HardFork/Combinator/Basics.hs | 14 +- .../Consensus/HardFork/Combinator/Compat.hs | 39 +- .../HardFork/Combinator/Degenerate.hs | 14 +- .../HardFork/Combinator/Embed/Binary.hs | 3 +- .../HardFork/Combinator/Embed/Nary.hs | 58 +- .../HardFork/Combinator/Embed/Unary.hs | 54 +- .../Consensus/HardFork/Combinator/Forging.hs | 9 +- .../HardFork/Combinator/InjectTxs.hs | 106 +- .../Consensus/HardFork/Combinator/Ledger.hs | 584 ++- .../Combinator/Ledger/CommonProtocolParams.hs | 19 +- .../Combinator/Ledger/PeerSelection.hs | 3 +- .../HardFork/Combinator/Ledger/Query.hs | 295 +- .../Consensus/HardFork/Combinator/Mempool.hs | 169 +- .../Consensus/HardFork/Combinator/Node.hs | 9 +- .../HardFork/Combinator/Node/InitStorage.hs | 6 +- .../Combinator/Serialisation/Common.hs | 44 +- .../Combinator/Serialisation/SerialiseDisk.hs | 11 +- .../Serialisation/SerialiseNodeToClient.hs | 67 +- .../Consensus/HardFork/Combinator/State.hs | 137 +- .../HardFork/Combinator/State/Types.hs | 95 +- .../HardFork/Combinator/Translation.hs | 15 +- .../Ouroboros/Consensus/HeaderStateHistory.hs | 7 +- .../Ouroboros/Consensus/Ledger/Abstract.hs | 67 +- .../Ouroboros/Consensus/Ledger/Basics.hs | 107 +- .../Consensus/Ledger/CommonProtocolParams.hs | 4 +- .../Ouroboros/Consensus/Ledger/Dual.hs | 206 +- .../Ouroboros/Consensus/Ledger/Extended.hs | 183 +- .../Ouroboros/Consensus/Ledger/Inspect.hs | 8 +- .../Ouroboros/Consensus/Ledger/Query.hs | 339 +- .../Consensus/Ledger/SupportsMempool.hs | 86 +- .../Consensus/Ledger/SupportsPeerSelection.hs | 2 +- .../Consensus/Ledger/SupportsProtocol.hs | 9 +- .../Ouroboros/Consensus/Ledger/Tables.hs | 388 ++ .../Consensus/Ledger/Tables/Basics.hs | 104 + .../Consensus/Ledger/Tables/Combinators.hs | 277 ++ .../Ouroboros/Consensus/Ledger/Tables/Diff.hs | 227 + .../Consensus/Ledger/Tables/DiffSeq.hs | 369 ++ .../Consensus/Ledger/Tables/MapKind.hs | 205 + .../Consensus/Ledger/Tables/Utils.hs | 327 ++ .../Ouroboros/Consensus/Mempool.hs | 11 +- .../Ouroboros/Consensus/Mempool/API.hs | 30 +- .../Ouroboros/Consensus/Mempool/Capacity.hs | 17 +- .../Consensus/Mempool/Impl/Common.hs | 366 +- .../Ouroboros/Consensus/Mempool/Init.hs | 23 +- .../Ouroboros/Consensus/Mempool/Query.hs | 84 +- .../Ouroboros/Consensus/Mempool/Update.hs | 428 +- .../BlockFetch/ClientInterface.hs | 3 +- .../MiniProtocol/ChainSync/Client.hs | 15 +- .../ChainSync/Client/InFutureCheck.hs | 5 +- .../MiniProtocol/LocalStateQuery/Server.hs | 75 +- .../Ouroboros/Consensus/Node/ProtocolInfo.hs | 3 +- .../Ouroboros/Consensus/Node/Run.hs | 67 +- .../Ouroboros/Consensus/Node/Serialisation.hs | 26 +- .../Ouroboros/Consensus/Storage/ChainDB.hs | 36 +- .../Consensus/Storage/ChainDB/API.hs | 88 +- .../Consensus/Storage/ChainDB/Impl.hs | 110 +- .../Consensus/Storage/ChainDB/Impl/Args.hs | 31 +- .../Storage/ChainDB/Impl/Background.hs | 86 +- .../Storage/ChainDB/Impl/ChainSel.hs | 346 +- .../Consensus/Storage/ChainDB/Impl/LgrDB.hs | 397 -- .../Consensus/Storage/ChainDB/Impl/Query.hs | 98 +- .../Consensus/Storage/ChainDB/Impl/Types.hs | 51 +- .../Consensus/Storage/ChainDB/Init.hs | 6 +- .../Ouroboros/Consensus/Storage/Common.hs | 1 - .../Consensus/Storage/ImmutableDB/Impl.hs | 5 + .../Storage/ImmutableDB/Impl/Stream.hs | 116 + .../Ouroboros/Consensus/Storage/LedgerDB.hs | 256 +- .../Consensus/Storage/LedgerDB/API.hs | 572 +++ .../Consensus/Storage/LedgerDB/API/Config.hs | 35 + .../Consensus/Storage/LedgerDB/DiskPolicy.hs | 148 - .../Consensus/Storage/LedgerDB/Impl/Args.hs | 76 + .../Consensus/Storage/LedgerDB/Impl/Common.hs | 133 + .../Consensus/Storage/LedgerDB/Impl/Init.hs | 308 ++ .../Storage/LedgerDB/Impl/Snapshots.hs | 406 ++ .../Storage/LedgerDB/Impl/Validate.hs | 301 ++ .../Consensus/Storage/LedgerDB/Init.hs | 277 -- .../Consensus/Storage/LedgerDB/LedgerDB.hs | 134 - .../Consensus/Storage/LedgerDB/Query.hs | 81 - .../Consensus/Storage/LedgerDB/Snapshots.hs | 297 -- .../Consensus/Storage/LedgerDB/Update.hs | 386 -- .../Consensus/Storage/LedgerDB/V1/Args.hs | 100 + .../Storage/LedgerDB/V1/BackingStore.hs | 121 + .../Storage/LedgerDB/V1/BackingStore/API.hs | 284 ++ .../LedgerDB/V1/BackingStore/Impl/InMemory.hs | 307 ++ .../LedgerDB/V1/BackingStore/Impl/LMDB.hs | 716 +++ .../V1/BackingStore/Impl/LMDB/Bridge.hs | 179 + .../V1/BackingStore/Impl/LMDB/Status.hs | 107 + .../Consensus/Storage/LedgerDB/V1/Common.hs | 256 + .../Storage/LedgerDB/V1/DbChangelog.hs | 1017 ++++ .../Consensus/Storage/LedgerDB/V1/Flush.hs | 37 + .../Consensus/Storage/LedgerDB/V1/Forker.hs | 480 ++ .../Consensus/Storage/LedgerDB/V1/Init.hs | 385 ++ .../Consensus/Storage/LedgerDB/V1/Lock.hs | 86 + .../Storage/LedgerDB/V1/Snapshots.hs | 255 + .../Consensus/Storage/LedgerDB/V2/Args.hs | 32 + .../Consensus/Storage/LedgerDB/V2/Common.hs | 534 ++ .../Consensus/Storage/LedgerDB/V2/InMemory.hs | 202 + .../Consensus/Storage/LedgerDB/V2/Init.hs | 377 ++ .../Consensus/Storage/LedgerDB/V2/LSM.hs | 53 + .../Storage/LedgerDB/V2/LedgerSeq.hs | 485 ++ .../Consensus/Storage/VolatileDB/Impl.hs | 5 + .../Ouroboros/Consensus/Ticked.hs | 19 +- .../Ouroboros/Consensus/TypeFamilyWrappers.hs | 15 + .../Ouroboros/Consensus/Util.hs | 56 + .../Ouroboros/Consensus/Util/Args.hs | 2 +- .../Ouroboros/Consensus/Util/DepPair.hs | 11 +- .../Ouroboros/Consensus/Util/EarlyExit.hs | 2 +- .../Ouroboros/Consensus/Util/IOLike.hs | 4 + .../Test/LedgerTables.hs | 57 + .../Test/Util/ChainDB.hs | 33 +- .../Test/Util/ChainUpdates.hs | 15 +- .../Test/Util/LedgerStateOnlyTables.hs | 79 + .../Test/Util/Orphans/Arbitrary.hs | 35 +- .../Test/Util/Orphans/IOLike.hs | 4 + .../Test/Util/Orphans/ToExpr.hs | 43 +- .../Test/Util/QuickCheck.hs | 26 +- .../Test/Util/Serialisation/Examples.hs | 16 +- .../Test/Util/Serialisation/Golden.hs | 20 +- .../Test/Util/Serialisation/Roundtrip.hs | 51 +- .../Test/Util/Serialisation/SomeResult.hs | 2 +- .../Test/Util/TestBlock.hs | 155 +- .../Test/Consensus/Mempool/Mocked.hs | 25 +- .../Ouroboros/Consensus/Mock/Ledger/Block.hs | 181 +- .../Ouroboros/Consensus/Mock/Ledger/Forge.hs | 4 +- .../Consensus/Mock/Node/Serialisation.hs | 19 +- .../Ouroboros/Consensus/Tutorial/Simple.lhs | 72 +- .../Consensus/Tutorial/WithEpoch.lhs | 51 +- .../test/consensus-test/Main.hs | 12 +- .../Test/Consensus/BlockchainTime/Simple.hs | 4 + .../Test/Consensus/HardFork/Forecast.hs | 16 +- .../Test/Consensus/HardFork/History.hs | 5 +- .../Test/Consensus/Ledger/Tables/Diff.hs | 120 + .../Test/Consensus/Ledger/Tables/DiffSeq.hs | 97 + .../consensus-test/Test/Consensus/Mempool.hs | 352 +- .../Test/Consensus/Mempool/Fairness.hs | 12 +- .../Consensus/Mempool/Fairness/TestBlock.hs | 48 +- .../Test/Consensus/Mempool/StateMachine.hs | 943 ++++ .../Test/Consensus/Mempool/Util.hs | 239 + .../MiniProtocol/BlockFetch/Client.hs | 7 +- .../MiniProtocol/ChainSync/Client.hs | 10 +- .../MiniProtocol/LocalStateQuery/Server.hs | 141 +- ouroboros-consensus/test/storage-test/Main.hs | 10 +- .../Storage/ChainDB/FollowerPromptness.hs | 15 +- .../Test/Ouroboros/Storage/ChainDB/Model.hs | 175 +- .../Ouroboros/Storage/ChainDB/Model/Test.hs | 3 +- .../Ouroboros/Storage/ChainDB/StateMachine.hs | 110 +- .../Test/Ouroboros/Storage/ChainDB/Unit.hs | 16 +- .../Test/Ouroboros/Storage/LedgerDB.hs | 22 +- .../Storage/LedgerDB/OrphanArbitrary.hs | 14 - .../Storage/LedgerDB/Serialisation.hs | 72 + .../{DiskPolicy.hs => SnapshotPolicy.hs} | 50 +- .../Storage/LedgerDB/StateMachine.hs | 537 ++ .../LedgerDB/StateMachine/TestBlock.hs | 351 ++ .../Storage/LedgerDB/V1/BackingStore.hs | 345 ++ .../LedgerDB/V1/BackingStore/Lockstep.hs | 811 +++ .../Storage/LedgerDB/V1/BackingStore/Mock.hs | 338 ++ .../LedgerDB/V1/BackingStore/Registry.hs | 62 + .../DbChangelog/QuickCheck.hs} | 132 +- .../Storage/LedgerDB/V1/DbChangelog/Unit.hs | 339 ++ .../Test/Ouroboros/Storage/TestBlock.hs | 41 +- scripts/ci/run-stylish.sh | 22 +- scripts/docs/modules-consensus.svg | 4391 +++++++++-------- sop-extras/src/Data/SOP/Functors.hs | 4 - 321 files changed, 26406 insertions(+), 7628 deletions(-) create mode 100644 docs/website/contents/about-ouroboros/utxo-hd.md create mode 100644 docs/website/contents/for-developers/utxo-hd/Overview.md create mode 100644 docs/website/contents/for-developers/utxo-hd/future-ledger-hd.md create mode 100644 docs/website/contents/for-developers/utxo-hd/utxo-hd.md create mode 100644 docs/website/static/img/utxo-hd/utxo-hd-replay-01-19-23.png create mode 100644 docs/website/static/img/utxo-hd/utxo-hd-sync-01-19-23.png create mode 100644 ouroboros-consensus-cardano/app/snapshot-converter.hs create mode 100644 ouroboros-consensus-cardano/golden/byron/disk/LedgerTables create mode 100644 ouroboros-consensus-cardano/golden/cardano/disk/LedgerTables_Allegra create mode 100644 ouroboros-consensus-cardano/golden/cardano/disk/LedgerTables_Alonzo create mode 100644 ouroboros-consensus-cardano/golden/cardano/disk/LedgerTables_Babbage create mode 100644 ouroboros-consensus-cardano/golden/cardano/disk/LedgerTables_Byron create mode 100644 ouroboros-consensus-cardano/golden/cardano/disk/LedgerTables_Conway create mode 100644 ouroboros-consensus-cardano/golden/cardano/disk/LedgerTables_Mary create mode 100644 ouroboros-consensus-cardano/golden/cardano/disk/LedgerTables_Shelley create mode 100644 ouroboros-consensus-cardano/golden/shelley/disk/LedgerTables create mode 100644 ouroboros-consensus-cardano/src/ouroboros-consensus-cardano/Ouroboros/Consensus/Cardano/Ledger.hs create mode 100644 ouroboros-consensus-cardano/src/ouroboros-consensus-cardano/Ouroboros/Consensus/Cardano/QueryHF.hs create mode 100644 ouroboros-consensus-cardano/test/byron-test/Test/Consensus/Byron/LedgerTables.hs create mode 100644 ouroboros-consensus-cardano/test/cardano-test/Test/Consensus/Cardano/Translation.hs create mode 100644 ouroboros-consensus-cardano/test/shelley-test/Test/Consensus/Shelley/LedgerTables.hs create mode 100644 ouroboros-consensus-diffusion/test/mock-test/Test/Consensus/Ledger/Mock/LedgerTables.hs create mode 100644 ouroboros-consensus/bench/backingstore-bench/Bench/Commands.hs create mode 100644 ouroboros-consensus/bench/backingstore-bench/Main.hs create mode 100644 ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Ledger/Tables.hs create mode 100644 ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Ledger/Tables/Basics.hs create mode 100644 ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Ledger/Tables/Combinators.hs create mode 100644 ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Ledger/Tables/Diff.hs create mode 100644 ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Ledger/Tables/DiffSeq.hs create mode 100644 ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Ledger/Tables/MapKind.hs create mode 100644 ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Ledger/Tables/Utils.hs delete mode 100644 ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/ChainDB/Impl/LgrDB.hs create mode 100644 ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/ImmutableDB/Impl/Stream.hs create mode 100644 ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/LedgerDB/API.hs create mode 100644 ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/LedgerDB/API/Config.hs delete mode 100644 ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/LedgerDB/DiskPolicy.hs create mode 100644 ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/LedgerDB/Impl/Args.hs create mode 100644 ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/LedgerDB/Impl/Common.hs create mode 100644 ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/LedgerDB/Impl/Init.hs create mode 100644 ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/LedgerDB/Impl/Snapshots.hs create mode 100644 ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/LedgerDB/Impl/Validate.hs delete mode 100644 ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/LedgerDB/Init.hs delete mode 100644 ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/LedgerDB/LedgerDB.hs delete mode 100644 ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/LedgerDB/Query.hs delete mode 100644 ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/LedgerDB/Snapshots.hs delete mode 100644 ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/LedgerDB/Update.hs create mode 100644 ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/LedgerDB/V1/Args.hs create mode 100644 ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/LedgerDB/V1/BackingStore.hs create mode 100644 ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/LedgerDB/V1/BackingStore/API.hs create mode 100644 ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/LedgerDB/V1/BackingStore/Impl/InMemory.hs create mode 100644 ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/LedgerDB/V1/BackingStore/Impl/LMDB.hs create mode 100644 ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/LedgerDB/V1/BackingStore/Impl/LMDB/Bridge.hs create mode 100644 ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/LedgerDB/V1/BackingStore/Impl/LMDB/Status.hs create mode 100644 ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/LedgerDB/V1/Common.hs create mode 100644 ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/LedgerDB/V1/DbChangelog.hs create mode 100644 ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/LedgerDB/V1/Flush.hs create mode 100644 ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/LedgerDB/V1/Forker.hs create mode 100644 ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/LedgerDB/V1/Init.hs create mode 100644 ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/LedgerDB/V1/Lock.hs create mode 100644 ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/LedgerDB/V1/Snapshots.hs create mode 100644 ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/LedgerDB/V2/Args.hs create mode 100644 ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/LedgerDB/V2/Common.hs create mode 100644 ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/LedgerDB/V2/InMemory.hs create mode 100644 ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/LedgerDB/V2/Init.hs create mode 100644 ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/LedgerDB/V2/LSM.hs create mode 100644 ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/LedgerDB/V2/LedgerSeq.hs create mode 100644 ouroboros-consensus/src/unstable-consensus-testlib/Test/LedgerTables.hs create mode 100644 ouroboros-consensus/src/unstable-consensus-testlib/Test/Util/LedgerStateOnlyTables.hs create mode 100644 ouroboros-consensus/test/consensus-test/Test/Consensus/Ledger/Tables/Diff.hs create mode 100644 ouroboros-consensus/test/consensus-test/Test/Consensus/Ledger/Tables/DiffSeq.hs create mode 100644 ouroboros-consensus/test/consensus-test/Test/Consensus/Mempool/StateMachine.hs create mode 100644 ouroboros-consensus/test/consensus-test/Test/Consensus/Mempool/Util.hs delete mode 100644 ouroboros-consensus/test/storage-test/Test/Ouroboros/Storage/LedgerDB/OrphanArbitrary.hs create mode 100644 ouroboros-consensus/test/storage-test/Test/Ouroboros/Storage/LedgerDB/Serialisation.hs rename ouroboros-consensus/test/storage-test/Test/Ouroboros/Storage/LedgerDB/{DiskPolicy.hs => SnapshotPolicy.hs} (87%) create mode 100644 ouroboros-consensus/test/storage-test/Test/Ouroboros/Storage/LedgerDB/StateMachine.hs create mode 100644 ouroboros-consensus/test/storage-test/Test/Ouroboros/Storage/LedgerDB/StateMachine/TestBlock.hs create mode 100644 ouroboros-consensus/test/storage-test/Test/Ouroboros/Storage/LedgerDB/V1/BackingStore.hs create mode 100644 ouroboros-consensus/test/storage-test/Test/Ouroboros/Storage/LedgerDB/V1/BackingStore/Lockstep.hs create mode 100644 ouroboros-consensus/test/storage-test/Test/Ouroboros/Storage/LedgerDB/V1/BackingStore/Mock.hs create mode 100644 ouroboros-consensus/test/storage-test/Test/Ouroboros/Storage/LedgerDB/V1/BackingStore/Registry.hs rename ouroboros-consensus/test/storage-test/Test/Ouroboros/Storage/LedgerDB/{InMemory.hs => V1/DbChangelog/QuickCheck.hs} (71%) create mode 100644 ouroboros-consensus/test/storage-test/Test/Ouroboros/Storage/LedgerDB/V1/DbChangelog/Unit.hs diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 6c9e98b130..b83255c4f2 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -76,6 +76,11 @@ jobs: cabal clean cabal update + - name: Install lmdb + run: | + sudo apt update + sudo apt install liblmdb-dev + # We create a `dependencies.txt` file that can be used to index the cabal # store cache. # diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 46efafa548..6b25382c1a 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -132,6 +132,41 @@ cabal test ouroboros-consensus:test:consensus-test --test-show-details=direct Note the second one cannot be used when we want to provide CLI arguments to the test-suite. +# Generating documentation and setting up hoogle + +The documentation contains some [tikz](https://tikz.net) figures that require +some preprocessing for them to be displayed. To do this, use the documentation +script: + +```bash +./scripts/docs/haddocks.sh +``` + +If not already in your `PATH` (eg when in a Nix shell), this will install +[`cabal-docspec`](https://github.com/phadej/cabal-extras/tree/master/cabal-docspec) +from a binary, and then build the haddocks for the project. + +Often times, it is useful to have a +[`hoogle`](https://github.com/ndmitchell/hoogle) server at hand, with the +packages and its dependencies. Our suggestion is to install +[`cabal-hoogle`](https://github.com/kokobd/cabal-hoogle) from github: + +```bash +git clone git@github.com:kokobd/cabal-hoogle +cd cabal-hoogle +cabal install exe:cabal-hoogle +``` + +and then run `cabal-hoogle`: + +```bash +cabal-hoogle generate +cabal-hoogle run -- server --local +``` + +This will fire a `hoogle` server at https://localhost:8080/ with the local +packages and their dependencies. + # Contributing to the code The following sections contain some guidelines that should be followed when diff --git a/docs/tech-reports/report/chapters/storage/ledgerdb.tex b/docs/tech-reports/report/chapters/storage/ledgerdb.tex index 1cfe211044..65f7349899 100644 --- a/docs/tech-reports/report/chapters/storage/ledgerdb.tex +++ b/docs/tech-reports/report/chapters/storage/ledgerdb.tex @@ -1,98 +1,8 @@ \chapter{Ledger Database} \label{ledgerdb} -The Ledger DB is responsible for the following tasks: -\begin{enumerate} -\item \textbf{Maintaining the ledger state at the tip}: Maintaining the ledger - state corresponding to the current tip in memory. When we try to extend our - chain with a new block fitting onto our tip, the block must first be validated - using the right ledger state, i.e., the ledger state corresponding to the tip. - The current ledger state is needed for various other purposes. - -\item \textbf{Maintaining the past $k$ ledger states}: As discussed in - \cref{consensus:overview:k}, we might roll back up to $k$ blocks when - switching to a more preferable fork. Consider the example below: - % - \begin{center} - \begin{tikzpicture} - \draw (0, 0) -- (50pt, 0) coordinate (I); - \draw (I) -- ++(20pt, 20pt) coordinate (C1) -- ++(20pt, 0) coordinate (C2); - \draw (I) -- ++(20pt, -20pt) coordinate (F1) -- ++(20pt, 0) coordinate (F2) -- ++(20pt, 0) coordinate (F3); - \node at (I) {$\bullet$}; - \node at (C1) {$\bullet$}; - \node at (C2) {$\bullet$}; - \node at (F1) {$\bullet$}; - \node at (F2) {$\bullet$}; - \node at (F3) {$\bullet$}; - \node at (I) [above left] {$I$}; - \node at (C1) [above] {$C_1$}; - \node at (C2) [above] {$C_2$}; - \node at (F1) [below] {$F_1$}; - \node at (F2) [below] {$F_2$}; - \node at (F3) [below] {$F_3$}; - \draw (60pt, 50pt) node {$\overbrace{\hspace{60pt}}$}; - \draw (60pt, 60pt) node[fill=white] {$k$}; - \draw [dashed] (30pt, -40pt) -- (30pt, 45pt); - \end{tikzpicture} - \end{center} - % - Our current chain's tip is $C_2$, but the fork containing blocks $F_1$, $F_2$, - and $F_3$ is more preferable. We roll back our chain to the intersection point - of the two chains, $I$, which must be not more than $k$ blocks back from our - current tip. Next, we must validate block $F_1$ using the ledger state at - block $I$, after which we can validate $F_2$ using the resulting ledger state, - and so on. - - This means that we need access to all ledger states of the past $k$ blocks, - i.e., the ledger states corresponding to the volatile part of the current - chain.\footnote{Applying a block to a ledger state is not an invertible - operation, so it is not possible to simply ``unapply'' $C_1$ and $C_2$ to - obtain $I$.} - - Access to the last $k$ ledger states is not only needed for validating candidate - chains, but also by the: - \begin{itemize} - \item \textbf{Local state query server}: To query any of the past $k$ ledger - states (\cref{servers:lsq}). - \item \textbf{Chain sync client}: To validate headers of a chain that - intersects with any of the past $k$ blocks - (\cref{chainsyncclient:validation}). - \end{itemize} - -\item \textbf{Storing on disk}: To obtain a ledger state for the current tip of - the chain, one has to apply \emph{all blocks in the chain} one-by-one to the - initial ledger state. When starting up the system with an on-disk chain - containing millions of blocks, all of them would have to be read from disk and - applied. This process can take tens of minutes, depending on the storage and - CPU speed, and is thus too costly to perform on each startup. - - For this reason, a recent snapshot of the ledger state should be periodically - written to disk. Upon the next startup, that snapshot can be read and used to - restore the current ledger state, as well as the past $k$ ledger states. -\end{enumerate} - -Note that whenever we say ``ledger state'', we mean the -\lstinline!ExtLedgerState blk! type described in \cref{storage:extledgerstate}. - -The above duties are divided across the following modules: - -\begin{itemize} -\item \lstinline!LedgerDB.InMemory!: this module defines a pure data structure, - named \lstinline!LedgerDB!, to represent the last $k$ ledger states in memory. - Operations to validate and append blocks, to switch to forks, to look up - ledger states, \ldots{} are provided. -\item \lstinline!LedgerDB.OnDisk!: this module contains the functionality to - write a snapshot of the \lstinline!LedgerDB! to disk and how to restore a - \lstinline!LedgerDB! from a snapshot. -\item \lstinline!LedgerDB.DiskPolicy!: this module contains the policy that - determines when a snapshot of the \lstinline!LedgerDB! is written to disk. -\item \lstinline!ChainDB.Impl.LgrDB!: this module is part of the Chain DB, and - is responsible for maintaining the pure \lstinline!LedgerDB! in a - \lstinline!StrictTVar!. -\end{itemize} - -We will now discuss the modules listed above. +THIS PART WAS PORTED TO THE HADDOCKS \section{In-memory representation} \label{ledgerdb:in-memory} diff --git a/docs/website/contents/about-ouroboros/utxo-hd.md b/docs/website/contents/about-ouroboros/utxo-hd.md new file mode 100644 index 0000000000..387037443b --- /dev/null +++ b/docs/website/contents/about-ouroboros/utxo-hd.md @@ -0,0 +1,33 @@ +# UTxO HD + +This document describes the design followed to move the ledger state +from memory to disk. + +## Expected performance + +On a 64G machine, with a AMD Ryzen 9 5900X processor, we obtained the following +results when replaying and syncing from scratch up to slot 75M: + + +| | Replay max mem | Replay time | Sync max mem | Sync time | +|------------------|----------------|-------------|--------------|-----------| +| Baseline | 13 GB | 1:51 h | 15 GB | 20:46 h | +| UTxO HD (in-mem) | 13 GB | 2:50 h | 16 GB | 25:04 h | +| UTxO HD (LMDB) | 8 GB | 3:15 h | 11.4 GB | 25:50 h | + +It is worth noting that these are single measurements, and they are only +intended to provide an indication of the expected performance. + +These results correspond to obtained around 18 January 2023. + +The plots below show how replay and syncing a node from scratch progress over +time, and how the memory usage evolves. + +![replay times](/img/utxo-hd/utxo-hd-replay-01-19-23.png) + +![sync times](/img/utxo-hd/utxo-hd-sync-01-19-23.png) + +## References + +* [Storing the Cardano ledger state on disk: analysis and design options (An IOHK technical report)](/pdfs/utxo-db.pdf) +* [Storing the Cardano ledger state on disk: API design concepts (An IOHK technical report)](/pdfs/utxo-db-api.pdf) \ No newline at end of file diff --git a/docs/website/contents/for-developers/utxo-hd/Overview.md b/docs/website/contents/for-developers/utxo-hd/Overview.md new file mode 100644 index 0000000000..ed7070fb1e --- /dev/null +++ b/docs/website/contents/for-developers/utxo-hd/Overview.md @@ -0,0 +1,3 @@ +# Overview + +TODO \ No newline at end of file diff --git a/docs/website/contents/for-developers/utxo-hd/future-ledger-hd.md b/docs/website/contents/for-developers/utxo-hd/future-ledger-hd.md new file mode 100644 index 0000000000..4ce8a4e8f1 --- /dev/null +++ b/docs/website/contents/for-developers/utxo-hd/future-ledger-hd.md @@ -0,0 +1,258 @@ +# Ledger-HD sketch (UTxO-HD v2) + +This document describes the result of the discussion between Ledger and +Consensus teams on 2022-01-17 about the future steps on UTxO-HD which would not +make sense to be called this way anymore in future versions and therefore we +propose Ledger-HD as a replacement. + +Below, we outline the tables that are expected to be moved to the disk, their +dynamics, the main computations that will leave the Ledger and some open +questions. + +This is meant to be just a sketch, details are not worked out yet. + +## Scope of Ledger-HD + +The plan for Ledger-HD is to move the following tables to the disk (we will show +the "lenses" that reach each data structure from the new epoch state): + +- The unified map of `rewards`, `delegations`, `pointers` and `deposits` + +```haskell +( ne :: NewEpochState era ) + & ( nesEs :: NewEpochState era -> EpochState era ) + & ( esLState :: EpochState era -> LedgerState era ) + & ( lsDPState :: LedgerState era -> DPState (EraCrypto era) ) + & ( dpsDState :: DPState (EraCrypto era) -> DState (EraCrypto era) ) + & ( dsUnified :: DState (EraCrypto era) -> UMap (EraCrypto era) ) + +data UMap c = UMap !(Map (Credential 'Staking c) (Trip c)) !(Map Ptr (Credential 'Staking c)) +``` + +- The current stake distribution per stake credential + +```haskell +( ne :: NewEpochState era ) + & ( nesEs :: NewEpochState era -> EpochState era ) + & ( esLState :: EpochState era -> LedgerState era ) + & ( lsUTxOState :: LedgerState era -> UTxOState era ) + & ( utxosStakeDistr :: UTxOState era -> IncrementalStake (EraCrypto era) ) + +data IncrementalStake c = IStake + { credMap :: !(Map (Credential 'Staking c) Coin) + , ptrMap :: !(Map Ptr Coin) + } +``` + +- The stake snapshots + +```haskell +( ne :: NewEpochState era ) + & ( nesEs :: NewEpochState era -> EpochState era ) + & ( esSnapshots :: EpochState era -> SnapShots (EraCrypto era) ) + +data SnapShots c = SnapShots + { ssStakeMark :: SnapShot c -- Lazy on purpose + , ssStakeMarkPoolDistr :: PoolDistr c -- Lazy on purpose + , ssStakeSet :: !(SnapShot c) + , ssStakeGo :: !(SnapShot c) + , ssFee :: !Coin + } + +data SnapShot c = SnapShot + { ssStake :: !(Stake c) + , ssDelegations :: !(VMap VB VB (Credential 'Staking c) (KeyHash 'StakePool c)) + , ssPoolParams :: !(VMap VB VB (KeyHash 'StakePool c) (PoolParams c)) + } + +newtype Stake c = Stake + { unStake :: VMap VB VP (Credential 'Staking c) (CompactForm Coin) + } + +newtype PoolDistr c = PoolDistr + { unPoolDistr :: + Map (KeyHash 'StakePool c) (IndividualPoolStake c) + } + +data IndividualPoolStake c = IndividualPoolStake + { individualPoolStake :: !Rational + , individualPoolStakeVrf :: !(Hash c (VerKeyVRF c)) + } +``` + +As noted by [@JaredCorduan](https://github.com/JaredCorduan), after CIP-1694 is +complete, there will probably also be a `ssStakeMarkDRepDistr :: Map (KeyHash +'DRep c) Coin` and a `ssDRepDelegations :: VMap VB VB (Credential 'Staking c) +(KeyHash 'DRep c)`. + +- The reward update + +```haskell +( ne :: NewEpochState era ) + & ( nesRu :: NewEpochState era -> StrictMaybe (PulsingRewUpdate (EraCrypto era)) ) + +data RewardUpdate c = RewardUpdate + { deltaT :: !DeltaCoin + , deltaR :: !DeltaCoin + , rs :: !(Map (Credential 'Staking c) (Set (Reward c))) + , deltaF :: !DeltaCoin + , nonMyopic :: !(NonMyopic c) + } +``` + +Where the `PulsingRewUpdate` is just a mechanism in order to pulse through the +stake snapshot and in the end yield a `RewardUpdate`. + +## Dynamics of these maps + +### Unified map + +On each application of a block (i.e. the `BBODY` rule) and each tick (i.e. the +`TICK` rules) we know what entries of the unified map are needed to execute the +rule. Therefore it fits the current design of UTxO-HD, and therefore can follow +the _same_ pattern as we currently have for the UTxO set: + +- Before calling the ledger rule we can query for the needed entries +- We can present the ledger with the values they asked for, but restricted to + the data available in the disk + changelog +- The ledger will either provide diffs for the given values or will return + updated values that we can then diff with the provided ones +- The resulting differences can be included in the current definition of the + `DbChangelog`. + +In particular, we know that the sets required for the deltas on the unified map +are small and therefore fit the overall design. + +> **_PLAN:_** Consensus will have a new table on the `LedgerTables` that will +> represent the unified map. Perhaps even 4 tables or a table of triplets. It +> will have its own place on the `DbChangelog` too. The flow above describes the +> general strategy for calling the `BBODY` rule, i.e. `(re)applyBlockOpts` and +> for calling the `TICK` rule, i.e. `applyTickOpts`. + +### Stake distribution + +The update to this map is performed by `updateStakeDistribution`. It is known +before calling the `BBODY` rule which UTxOs are going to be deleted and the rule +execution logic itself knows the UTxOs that are going to be added. Therefore we +can follow a logic similar to the above: + +- Before calling the ledger rule we can query for the needed entries +- We can present the ledger with the values they asked for, but restricted to + the data available in the disk + changelog +- The ledger will either provide diffs for the given values or will return + updated values that we can then diff with the provided ones +- The resulting differences can be included in the current definition of the + `DbChangelog`. + +> **_PLAN:_** Consensus will have a new table on the `LedgerTables` that will +> represent the incremental stake distribution (maybe two tables, one for creds +> one for ptrs). It will have its own place on the `DbChangelog` too. The flow +> above describes the general strategy for calling the `BBODY` rule, i.e. +> `(re)applyBlockOpts`. + +### Stake snapshots + +The snapshots are rotated by the `snapTransition` rule (called by `TICK`). This +is the most complicated of the three because it involves accessing the unified +map and the `IncrementalStake` in their entirety in order to fold them. We do +these two steps: + +``` +step1 = (dom activeDelegs ◁ credStake) ∪ (dom activeDelegs ◁ ptrStake) +step2 = aggregate (dom activeDelegs ◁ rewards) step1 +``` + +Where +- `activeDelegs` comes from the delegations in the unified map, +- `rewards` come from the unified map, +- `credStake` and `ptrStake` come from the incremental stake + +However, there is an important note here. The `ssStakeMark` is only used to be +put in `ssStakeSet` on the next snapshot rotation, then `ssStakeSet` is only +used to be put in `ssStakeGo`, and `ssStakeGo` is only used to prepare the +`PulsingRewUpdate`. This is done in `PulsingReward.startStep` and it also uses +the `rewards` map of the unified map. Assuming the reward calculation is done +outside of the ledger rules, the snapshot is not really needed by the ledger. In +that case we would avoid providing the whole map to the Ledger because the +calculation would be performed outside of the ledger rules. + +The `ssStakeMarkPoolDistr` field is used to be put in `nesPd` on the +`NewEpochState` (by `NEWEPOCH`) which later will be used to provide ledger views +and calculate leader schedules. Note this is purely a Protocol concern and thus +probably a Consensus concern. + +However it seems that the `ssPoolParams` are in fact modified by the Ledger. +This should not be very problematic as we would know in andvance which pools are +updating their params, and we could replicate the schema above for this map. + +## Reward computation and the ADA pots + +The way the rewards computation happens now (see `PulsingRewards` and +`Shelley.Rules.Rupd` and `Shelley.Rules.Tick`) is that on each ticking we pulse +a chunk of the rewards update so that when we reach the epoch boundary we want +to have pulsed through the whole `ssStakeGo` snapshot that was used when +creating the pulser. + +In the end, the reward computation has to produce a `RewardUpdate`: +```haskell +data RewardUpdate c = RewardUpdate + { deltaT :: !DeltaCoin + , deltaR :: !DeltaCoin + , rs :: !(Map (Credential 'Staking c) (Set (Reward c))) + , deltaF :: !DeltaCoin + , nonMyopic :: !(NonMyopic c) + } +``` + +where we will use the `delta*` fields to update the treasury, reserves and fee +pot on the `NEWEPOCH` rule. We also use the `rs` field when calling +`updateRewards` which happens also on `NEWEPOCH`. + + +If Consensus can compute the `RewardUpdate` (possibly on a separate thread that +traverses the map at its own pace?) then we can provide the `RewardUpdate` to +the `NEWEPOCH` rule so that the pots can be updated. + +> **_PLAN:_** Consensus will compute the rewards outside of the Ledger and will +> provide the parts of the `RewardUpdate` to the ledger. In particular, on the +> epoch boundary, it will provide `deltaT`, `deltaR`, and `deltaF`. Ledger will +> not compute the `RewardUpdate` thus the pulser becomes dead code. + +> Moreover, the ledger will provide a function `f :: Block -> Set +> (StakeCredential c)` so that Consensus can supply Ledger with only the part of +> the `UMap` (the unified map) that it requires. In particular, Consensus will +> have to apply the `rs :: !(Map (Credential 'Staking c) (Set (Reward c)))` field +> of the reward update to the unified map on the epoch boundary, prior to +> computing this view for the ledger (to ensure that reward withdrawals are +> correct within the given block). + +## Snapshots and Leader Schedule + +The `checkIsLeader` functions for Praos and TPraos makes use of the stake +distribution by stake pool in the `LedgerView` (see the definitions in +`ouroboros-consensus-protocol`). If the snapshots (and therefore the +`ssStakeMarkPoolDistr` field) reside in the Consensus side, we can produce the +relevant stake distributions when needed and don't involve the ledger. In any +case this functionality is in between Ledger and Consensus so it makes sense to +move it out of the ledger. + +> **_PLAN:_** Consensus will manage the snapshots to produce stake distribution +> by pool that can be used by Consensus later to resolve queries about the +> LeaderSchedule. Ledger will not know about the Snapshots. In particular, the +> UTxO-HD report includes the concept of Snapshots of tables, which would be +> used to manage and access these snapshots. + +Note that this implies creating a new package or component at the +Consensus-Ledger boundary whose owner would probably be the Consensus team as +its responsibilities would be related with computations required for the +Consensus protocol (leader checks, and similar). + +## Open questions + +- Should the Ledger return the diffs? it actually internally compute diffs, but + they the diffs are applied to the values before returning. If we wanted to + return the diffs instead, there are many intermediate layers through which + they will have to be floated, but it should be doable. + +- Rewards withdrawals are known beforehand. Ledger could produce deltas that + would take effect in a future epoch boundary. diff --git a/docs/website/contents/for-developers/utxo-hd/utxo-hd.md b/docs/website/contents/for-developers/utxo-hd/utxo-hd.md new file mode 100644 index 0000000000..431239f22a --- /dev/null +++ b/docs/website/contents/for-developers/utxo-hd/utxo-hd.md @@ -0,0 +1,302 @@ +# UTXO-HD + +This document aims to provide an comprehensive guide on UTXO-HD, why we are +implementing this feature, what brings to Cardano and what it implies. + +## Why does Cardano need UTXO-HD + +Cardano is built following the UTXO model. This means that the Ledger state +contains a map from _transaction inputs_ to _transaction outputs_. A transaction +might consume some of those entries and produce new ones. Each entry is owned by +an address which is the one that can spend it. + +The UTXO set is an always growing data structure. Currently the `cardano-node` +uses a fair amount of RAM but this amount will keep growing as more traffic +takes place on the network (transactions per second, transaction size, block +size, ...). This is bad for decentralization and sustainability of +the network as eventually only powerful machines would be able to participate on +it. + +To improve decentralization, a decision was made to move this data to persistent +storage which, albeit slower, is much cheaper than RAM. The Consensus layer is +reworked so that the flow of data now allows for UTXO entries to come from some +backend storage, which might be on disk or in memory trading memory for speed. + +The UTXO-HD feature provides two backends which can be chosen in `cardano-node`'s +configuration file: + +- `LedgerDBBackend: V2InMemory` +- `LedgerDBBackend: V1LMDB` + +How these backends work is shown below in this document. + +## UTXO-HD design in Consensus + +> ℹ️ We are going to focus on Shelley based eras, ignoring Byron for now. + +### The `NewEpochState` data structure +The Ledger layer defines the data structure that holds the state of the blockchain +after applying some number of blocks, the `NewEpochState`. Among other things, +this data structure holds a UTXO set which is a `Map` from +`TxIn (EraCrypto era)` to `TxOut era`. + +In order to apply the different Ledger operations, there is no need for this set +to be complete at all times, as only entries consumed by the transactions will be +accessed. When given a block or a transaction, the Ledger code provides functions +for getting the set of keys that would be necessary to exist in the UTXO set for +that block or transaction to apply correctly.Taking advantage of this, the Consensus layer will modify this +container such that it only contains the entries necessary for the Ledger rules. + +### Shelley instantiation and ledger tables + +The `LedgerState (ShelleyBlock proto era)` data family instances are augmented +with a new field which will hold these entries that will be extracted from and +injected to the `NewEpochState` before calling the Ledger rules. This new field +(which we call _ledger tables_) is a container-like structure parametrized by a +`Key` and `Value` type families. + +```diff haskell +data instance LedgerState (ShelleyBlock proto era) mk = ShelleyLedgerState { + shelleyLedgerTip :: !(WithOrigin (ShelleyTip proto era)) + , shelleyLedgerState :: !(SL.NewEpochState era) + , shelleyLedgerTransition :: !ShelleyTransition ++ , shelleyLedgerTables :: !(LedgerTables (LedgerState (ShelleyBlock proto era)) mk) + } + +data LedgerTables l mk = LedgerTables { + getLedgerTables :: mk (Key l) (Value l) +} +``` + +For a Shelley block, these type families are mapped to the same types as above: + +- `Key (LedgerState (ShelleyBlock proto era)) = TxIn (EraCrypto era)` +- `Value (LedgerState (ShelleyBlock proto era)) = TxOut era` + +To instantiate the `mk` type variable, some _mapkinds_ are defined: + +| `MapKind :: Type -> Type -> Type` | Container | Used for | +|-----------------------------------|-------------------------------|------------------------------------------------------------------| +| `ValuesMK k v` | `Map k v` | Ledger states passed to and from the Ledger rules | +| `KeysMK k v` | `Set k` | Querying the disk for the values needed by a block | +| `DiffMK k v` | `Map k (Delta v)` | Carrying the differences created by applying a block | +| `EmptyMK k v` | $\emptyset$ | When not needing info about the UTxO set, or the values are inside the `NewEpochState` | + +The actual invocation of the ledger rules make use of a `NewEpochState` which is unaware of any of this machinery. We use +the `stowLedgerTables`/`unstowLedgerTables` functions to inject and project the values in the +`NewEpochState` to the ledger tables, making this completely transparent for the Ledger layer. + +```haskell +stowLedgerTables :: l ValuesMK -> l EmptyMK +unstowLedgerTables :: l EmptyMK -> l ValuesMK +``` + +> ⚠️ It is very important to note that `EmptyMK` just means that _the ledger tables are empty_. This says nothing about whether there are values in the `NewEpochState`'s UTXO set. In the Consensus layer we take much care to ensure that the combination of `EmptyMK` having values in the internal UTXO set only happens at the Ledger layer boundary (via `stowLedgerTables`). Any other instance of `l EmptyMK` will mean that there are no values in the tables nor in the `NewEpochState`. + +### Interacting with the Ledger layer (high level) + +The Consensus layer invokes essentially 4 Ledger operations: forecast, tick and +applyBlock, applyTx. Each one of these rules have different requirements on the contents +of the UTXO set. + +| | Requirements | Input | Output | +|-------------|----------------------------------------------------------------------------|------------|------------| +| Forecasting | Doesn't use the UTXO set | `EmptyMK` | `EmptyMK` | +| Ticking | Doesn't use the UTXO set but might produce changes on it | `EmptyMK` | `ValuesMK` | +| ApplyBlock | Consumes inputs for the transactions in the block and produces new entries | `ValuesMK` | `ValuesMK` | +| ApplyTx | Consumes inputs for the transactions in the block and produces new entries | `ValuesMK` | `ValuesMK` | + +When ticking and applying a block, the Consensus code computes the difference +between the input and output sets producing `DiffMK` tables. The ticking and +applying steps are executed in sequence, producing a `DiffMK` for the +combined operation. The Consensus layer uses this `DiffMK` to influence the +values that are used when dealing with later blocks. + + +### Managing the differences + +To ensure the properties of the Ouroboros protocols, the Consensus layer needs +to be able to perform rollbacks on the chain of at most `k` blocks (which in +mainnet equals `2160` blocks). Because of this, the differences of the last `k` +blocks cannot be considered immutable and therefore they cannot yet be flushed to +persistent storage. This same principle is the one that dictates that ledger +snapshots (for restarting the node) have to store ledger states before or at the +immutable tip of the chain. + +Following this same reasoning, the way differences are carried around changes +depending on the specific backend used by the LedgerDB, whether it lives on the +disk or in memory: + +#### On-disk backend + +The on-disk backend uses the concept of an _anchor_ which is before or at the +immutable tip. This _anchor_ contains a full UTXO set stored in the disk, in what we call the `BackingStore`. In +order to get values for applying a particular block, the Consensus layer has to +read those values from the anchored UTXO set and apply all the differences from +that point to the tip of the chain. + +This means that to the pre-UTXO-HD LedgerDB that held the last `k` ledger +states, a side sequence is added which holds the differences resulted from +applying each of the last `k` blocks. This sequence is held in a `FingerTree` +which contains the combination of all the differences that can be applied +directly to a set of values. + +The Consensus layer will periodically flush the differences in between the +anchor and the current immutable tip to the on-disk backend, advancing the +chain. + +#### In-memory backend + +The in-memory backend augments each of the `k` values contained in the LedgerDB +to hold a full UTXO set. This emulates exactly how the LedgerDB looked like +before UTXO-HD. After each operation with the Ledger, the resulting differences +are applied to the set of values on the tip, producing the new UTXO set. + +The memory footprint of this solution is almost equivalent to the pre-UTXO-HD +one. There aren't `k` UTXO sets, but just by Haskell's sharing, there is one +UTXO set, the others sharing most of their contents with each one's predecessor. + +### The forker abstraction + +In order to perform operations with the Ledger layer, Consensus defines the +`Forker` abstraction as _an API to evaluate forks_. Forkers give access to +reading values at a specific point in the chain. Its implementation depends on +which backend is at use in the LedgerDB abstracting over both of them. + +It is important to note that when using the on-disk backend, a `Forker` will +mantain a consistent view of the _anchored_ UTXO set, which means that writes to +the anchor are queued while the `Forker` is held. For this reason, `Forker`s +should be an ephemeral resource, released as soon as possible. + +### The mempool + +The mempool behaves pretty much as a virtual block. The design is not +particularly complex as we just ask a `Forker` for the inputs for a transaction +when applying it. + +The only caveat compared to the pre-UTXO-HD implementation when using the on-disk +backend is that some more re-validation of transactions will take place. +Previously, the mempool cached the latest ledger state and therefore we +could run a separate thread that would sync the mempool with the LedgerDB and +revalidate all the transactions asynchronously once the tip of the LedgerDB had changed. + +Now, we might not be able to +apply a transaction if the `LedgerState` on top of which we had applied the +others is gone from the LedgerDB as we would have lost the differences from +the anchored UTXO to that particular state. Therefore, adding a transaction might in some +cases trigger a sync with the LedgerDB and therefore a revalidation of the +previous transactions. + +It is important to note that the old behavior (only the thread monitoring the LedgerDB +would trigger a resync) was not crucial, now there is just an innocuous race +between the trigger that monitors the LedgerDB and the process that adds the +transaction, which will result in the same final state regardless of which of +those wins the race. + +### Ledger state queries + +> TODO: revisit, I think these are much much faster now + +Most of the queries don't require the UTXO set, but there are three in particular +that do: `GetUTxOByTxIn`, `GetUTxOWhole` and `GetUTxOByAddress`. We assume that +`GetUTxOWhole` is considered to be a debug query so we don't worry about its performance. For +`GetUTxOByTxIn`, the query is fast because we are accessing explicit entries in +the UTXO set. + +However, it is `GetUTxOByAddress` that poses a real problem, as we need to query +the whole UTxO set, apply all the differences to it and then traverse it +entirely to find out the UTxOs belonging to an address. This query is quite +slow even without UTxO-HD and in fact its usage is already discouraged. It +should not be a responsibility of the node to maintain access to this if it is +not needed by the logic that runs the blockchain, so the plan is to move this +into a separate process/client that runs an index of UTxOs by address that can +provide fast access to it (see [#4678](https://github.com/IntersectMBO/cardano-node/issues/4678)). + +### The `CardanoBlock` + +The Consensus layer is built around the concept of blocks, and for the specific +case of Cardano, a special block is used: the `HardForkBlock`. A `HardForkBlock` +is an n-ary sum type, which contains a particular block out of the list of +blocks that exist in the Cardano blockchain. + +On the outside, a `HardForkBlock` is made in a way such that its usage is +almost transparent for the Consensus layer, just as any other block, however for +ledger tables there are some complications. Revisiting the +`LedgerState (HardForkBlock xs)` instance, it is easy to spot +that it is an n-ary sum of ledger states for each of the blocks: + +```haskell +newtype instance LedgerState (HardForkBlock xs) mk = HardForkLedgerState { + hardForkLedgerStatePerEra :: HardForkState (Flip LedgerState mk) xs + } + +newtype HardForkState f xs = HardForkState { + getHardForkState :: Telescope (K Past) (Current f) xs + } +``` + +So, in reality, when holding a `LedgerState (HardForkBlock xs) ValuesMK`, it +actually contains a `LedgerState a ValuesMK` for the particular era in the n-ary +sum. This implies that the contents of the ledger tables are mappings from +`Key a` to `Value a`, which change on each era. + +However, a value of type `LedgerTables (LedgerState (HardForkBlock xs)) ValuesMK` +will hold mappings from `Key (LedgerState (HardForkBlock xs))` to +`Value (LedgerState (HardForkBlock xs))`. When defining these type instances we +had two choices: + +- Make `Value (LedgerState (HardForkBlock xs))` equal to the `Value a` of the + particular era in the ledger state. Aside from the complications implementing this might + impose (in terms of type-level machinery), this would mean that when transitioning from one era to the next + one, the whole UTXO set in the tables would have to be updated to translate + all the entries to the newer era. If this set was on the disk, this would be + prohibitively costly. + +- Make `Value (LedgerState (HardForkBlock xs))` a sum type that can hold values of + any eras. This solution makes it very easy to carry `LedgerTables` in the + Consensus layer as values do not need to be translated, in fact + values from older eras might co-exist with those of the current one. The + disadvantage of this solution is that injecting the ledger tables in the + ledger state (so `withLedgerTables :: LedgerTables ... mk -> LedgerState ... anymk -> LedgerState ... mk`) + implies that we are going from hard-fork keys and values to keys and values of + the particular era, making the necessary era translations on-the-fly. + + This tradeoff was considered acceptable and because of it we put much care + in only injecting small tables, such as the set of values needed to apply a + block (which is bound by the maximum size of the block). Developers integrating + the UTXO solution in other tools should understand this limitation and put + great care in not violating it for example by injecting and projecting the whole + UTXO set on every block which would simply blow up the memory consumption. + +It is important to note that for any era in the Cardano blockchain, the `EraCrypto` +type family instance is the same (`StandardCrypto`), which makes all `TxIn (EraCrypto era)` keys equal. Thanks +to this, we can define the `Key` for `HardForkBlocks` equal to this same type, +which we call a `CanonicalTxIn`. + +### Storing snapshots + +Before UTXO-HD, ledger state snapshots were CBOR-serialized files containing a full +`ExtLedgerState blk` value. Now there is a separation between the `ExtLedgerState blk EmptyMK` file and the `LedgerTables (ExtLedgerState blk) ValuesMK`. This means that snapshots from before UTXO-HD are +incompatible with the UTXO-HD design and replaying the chain will be needed when +enabling UTXO-HD. Moreover, snapshots created when using one of the UTXO-HD backends +cannot be used with the other backend, and will require a replay. + +| | `ExtLedgerState blk EmptyMK` | `LedgerTables (ExtLedgerState blk) ValuesMK` | Live tables | +|--|--|--|--| +| In-memory | `/ledger//state` | `/ledger//state/tables/tvar` | N/A | +| On-disk | `/ledger//state` | `/ledger//state/tables/data.mdb` | `/ledgerdb/data.mdb` | + +In the tables part of the snapshot, the in-memory backend will store a serialization of the `Map (Key (CardanoBlock c)) (Value (CardanoBlock c))`, whereas the on-disk backend will store a copy of the LMDB database. + +## Impact on the node + +The **in-memory** backend should have very little impact in the node. + +The cardano-node will perform two operations on startup, and each of them suffer a varying impact for the **on-disk** backend: + +| | Impact | Estimated time difference | +|--|--|--| +| Syncing | Low, cryptographic operations dominate the performance | 16h vs 17h | +| Replay | High | 2h vs 3.5h | + +As for the behavior of a cardano-node that is synced to the tip of the chain, the impact of UTXO-HD should not be problematic because, given the pace at which blocks are produced (on average every 20s), there is enough time to perform the UTXO-HD operations. diff --git a/docs/website/sidebars.js b/docs/website/sidebars.js index da14732285..e464501c35 100644 --- a/docs/website/sidebars.js +++ b/docs/website/sidebars.js @@ -23,6 +23,7 @@ const sidebars = { label: 'About Ouroboros', items: [ 'about-ouroboros/index', + 'about-ouroboros/utxo-hd', 'about-ouroboros/References' ] } @@ -55,6 +56,14 @@ const sidebars = { 'for-developers/HandlingBlocksFromTheFuture' ] }, + { type: 'category', + label: 'UTxO HD', + items: [ + 'for-developers/utxo-hd/Overview', + 'for-developers/utxo-hd/future-ledger-hd', + 'for-developers/utxo-hd/utxo-hd', + ] + } ] }; diff --git a/docs/website/static/img/utxo-hd/utxo-hd-replay-01-19-23.png b/docs/website/static/img/utxo-hd/utxo-hd-replay-01-19-23.png new file mode 100644 index 0000000000000000000000000000000000000000..9f92614ee682f74b40ad6626ff4aad72ccd86170 GIT binary patch literal 75262 zcmdqJc{r5s8wW~KAtX{6OQ}eSO2%ZZP)S22AwtPWW|#>fyRs$pjgrPv2}#+e82d7! z)Qn1&VJt(DZEV?xIqy3|+4`M7&ULP{Tvy+(mgjxm`?>e~bKlP!d`9mCFSiIc8yg$1 z&PlCvY;0WRY;0>@tm6Rwrc2;f0UI0p!851NYcm)O;BTHjeG2|4VPo@Ql&~?_Xta_e zY#%p@VPiYu!^ZGocr(5y?%4x8dZg_O@SKQP2(b3U39Im9KfM;}+^3-P=Baw~fM~h^uX@1&#|I86XJ9!@=HXk7&pCiC8A0NvSA4`T0NdFPW zQ{a~+<0+a^Qo;bv!zdx<#0@eApJo$F8KuOM3=hVUr;KeyMMd%P@e&det*x!a#l;pD z78)8FG@)Ikz?r$wCrupL*!ar9|JWby<%6)XZDZ5XI(ptUrZ1Kl)?SsprvIq!wAlSy zH#+x?7D!Y;CuS_D_p{kYa2pvvs@A`hf8m~&_cBc^_uEMfYQN#GI~$Iz+x~yk|Jr-I zdylxgPCgy#A1$D3UsC%9-bLipk{-M(~W9i~%TQS6sw|>wzU-Fiv zM};Sk$zr%IoVT|h4?iv-4K5W(a0t|RGBwd&`cS~-qAoM5uBi%@` z>egQQhfuZsGY#c~5C;L59{$_JqjPV(d{c^cM(L8^l{6H8bEXr^wNGsupFka_Rtms%HDdA%6qw`Mb&I7Wi5tN;R0@f3&Q3!V*2uv-tc)Q&K>1|nO=Y6km$wDThin7 zY})g(2a|xEXR4#$I3K4X2>hX>|8D_2uw-yTEo8IR9w!h_}IrBF=&98aQ(ck+Bp_S!heU%b zKS3bJ&Wt^rCqM{OdH)QR6WP&EwI2_*Ys}LS?;RUU@Rg%%+)3em5!);+S5P@hRmTuR zrpw_PQ2LYZ9~Q3E=D}O})SZ_ml34p(*|%3+N0FSI%FWcj*n(O7l)-d+{#_XO=XKoa zRj;{n9+-|L{M*QaR1Xo7*T=Omo-!@b=c{-t;~w+3mS2Alcpqn4XUR2-W+ffwZ?FD5 zZ$UF9*Y0QzIf3ogoONPRvZ^GrKV85`jAKMUI#<2%JKW`EV13yr9HGsmWawng`O#+S z4^E>)?-K~gpJS$|Lov6QRF$=tBAPW?(1rEGCcCe+9?N#8J;8-gDaxnBFW z#{FzOi^5~u9-lCCbZa`+Op=Jbr1#T+2c*nnhyGWE5PZ$=Y=6xraYQ(&rJ*Fy`eT-F z_lJrAYmS=#LU})}N!kH{JmNjQ)?OhTFVh@0I&`BEmG}OntBNLLtHWvk@O)WWF>*r& z(o#7q^D?=_>8ce0*x)`Y&+&~|81+&zVLUmi+#kWCG*hUqoKKb)AI66ezAfDP1QaR@ zdCW&nl9125X;toQRW4ShVvs+Dc6S=fGdnd|g4k zdZ=b-SFz}H9xa@jU!#=tDFx*?d%Tq>{<7jzW@Y!+DSD;tt*c`@xcv|`6Get5@&8c$ie6#o#ucI(QCAC{Gr;}9takpGF9`If0fR$Wq z3->}4Xfh3Ff&OO|XFfwEkJ$M-COB{Z@oGe=2fbrc~kVL zjtzbz5;|*BF@Qb|p3qQe+0Hcb1y<%Smn@!K9Jlv1il6GBd5<^!QNEVhI-I78d)u&l zT6ln?!}e7J-aEx!R{&~XA~EyXHE@w}0wyOEj7P7bHRL#p=_NXM^pe9yKZFG3tL~@! z)e9&w{(Ci*A1ZD7KTcDnW!CC~!Y4I`4=DEQjSd-05AiMapwGIWS~AtOj8FT~ z0tvIc>=&mqECszBD7oAzeJYzULe>*VnR%r9cg&>VYwTA!pF^I%(lKW zSC>Zt_*zb;%51o~J8RC-|24h;E5`l5j~n^*uxhQQg+zcx{;i|NZv%fEmuDGOPqe_% zr8WQ)sOR^F4oIdQj%CPCu0<9GFvaxJ=J@KROfJ znsX<)L3UV8)A1oY0*^(XUh_2m!xj=>e_XPU)d)lB3JIyzsHApHD3eL_wnUYNS(g2`)oot_Lgfw_mjM>-oMU_!(CqGf8q~I56*Z0F*RcQ z?*nolH**!Bq{J81#OF=9&UWnAq72Ff8mP>kF^bRc-y$yNjlPSY+)*E|&%+efnd*D% zmie&#?Z;fj*zbL3WLCH+(HMoWYlTyXw zln=u}`E>dR??ZyREkvIq4GL*)r(7}4df*U z<!;M7H<(v_YXTNnbCPgvDW2kn`zL=_XO(2!z+_`C0;6ex=p&KNqMLmMbJ62 z%?bbO(c?^$%^3*LsF26r>d7`3*3S)Rr2_wW*>MWfW~cUt;M44rLV(OUY@Ox&PvGZgon& z?-H2T{lsx-*S~#@%p1oLyuB+Yg(}xRRn*g)jq05p-h;O*AEjzFPs&#gRtLCf$24|X z6!ZVvf4n8_YbUY0kg{kp?Lfj&*;^e7)Svre+ZHo>Q*)yg<%| zx>ci}gSIVA9TJ@p0xq+~dDs(X_2v!-HfjhOC3@=<*#7N}F50+`bPqN7!|cjs88;seR|Pn9|N7N%stK(zs+5v$Utqj@UX_WI~shq;FNR5%Zdww zZLF-_%J6(;QQGvU6Agh5r=tv-I_aTBST*!Cr=gJ&;Zwd`OpKnZ8kDpv_|;ie@zaK5 zf|%s71~HqvwVC~u#Zc!OG15=>SzJ$A&G~Ze%gZt(_$=TZzNmY)@*1A4O&-*Ck;_lw z3C4ut$G(U)892LJ(m4qdglK$HQ4!vL%=#P+()C2kH%-kM4&h09+T>~+y`C{s(|6al z=3XmA-yzOme9Hx=yh8G^mATW}@Flxu@?A;p*Su#p^GtlqB;{htxosL~RafufJ0@P` zp(}b16|D!}bc*?=+7}nZ!sW#aR~x`CKc?)ivOZ6oP%iD9D#TR5)kI|4AC)c_p=6Xn z@>qAZ;@9lJ!JLlBt<8$<$2>$E$sB~@EiJLPh$S7=82{#usBYKmF#!CeS4J+4WT1Eh z$6&91&%h`lHJB|HTYesHa|-#-e_pqc8I#jWg%#zM)fIw|NDwSz_EW-vp|iNHlOvU; z9mi&acx>9r)Q$#VUpjZ_wuD*57T(2w;^>fviH9e>=1=s(j@s*{ZZ^|izlJ|p)X zrRT+NwCXX7zuTGYa&gACg*-bkoPq~K0$(<<#D=+j6=OvZ!1hQR8nnh<29$IjI zU;|H5kC1!98x8^l5>TqxzpGX}mUgyW+j34FOsq#-%5*y;Y&!ULyWSDKjn+XEgBs|$ zN5ANhMo&}Fd9PCS4V9deVIW#+ugRvJ&{{4 zvACV%6-ifPdiSV=LjdYo>4%uOX9fh~1 zr(8*Cw1#nN{{!N0+V||k-d(>%G4*RJEYQ;(=OqFx>L{Mkk-vU^@7Z{Q=k~|gq2j~z zaR-;%?jEr%YNHO@?@Y|N@4nZHE=y6lqs5=E!_lB%;%23oN*|hLV$Js+8 z)8(;o^2se=ovQM)3QbBaE|PX0`^ncM9p}}K9~zb-GhHE0~ib|p&vkHZ894LcH!2QZrHzyc7<^H?e^bIr%x2L7h%$LoL*Lj z+ljR_WWXLD-0#^}_pHC7e*IXz%NVr3Iscse0;U$=J{qHIlZjFavSO!;c{30r`Okkh zvDv|7nbt#?-kKENd{O+gmeAibv_stlX*fiL*nPe|ZL6M=E%iz+vGcBFmDUUDqz33= z`2_QcNV1UcGlDXQi|+x5c`@()0K%Q6fQT%1BiFbLPGoqUg% zmu3f$r-aN#x8r2AZDXeu(1|FT49d}a_RU)&$NuGI-pIG4Gi9FT$f~pFyShd8*ui$8 z;?kdHAy&d&s-fCx33^{_3?i>6%Ui6e6w#Bx)~EYg5m&16#8IcDL_MMYG6`0N|M@D$ zY=n+sw;1DOo;kW+5A@0lu^*>z=L*+=dHf;_)>IB2;3XuheJ(!DwIm^+p{k@r-J$Pg zo(j6{0LUYwo(~+eCI)Q8bV!RHa@b)%@w+yuzp|!EdiPZFhf7S;_Iq}gP6n1G*3|S1Zb%B^ zi68aK^HlBr*d1}ogjv`BsWueI+cxqkL%5@zguS)_=Q@mh)cP?>5qkwlfGhazB!1~r zaX(U%9D1ljNp_@YDE-Sb8kTlmKUYabu>O0pE(Z`Pm!5SErst;~EH4WFp@ebJbtqJ4oMdr2}t_R+n((_hcOUbRz+kmmkRCHFF zJp`z?jr-dMapBGI`De2oLwWn|U)?2GjIJN&D@uIp_1?`+WPTCvjXE$O0$m zYBL|jm>*Cqhg=Cr4CL?$$|R#FO~gX5{&Xu;(%#iVQ>j^Ef*wnbVix1?Hni(*6-;~U zzq{(EsF^LlaFpNP{_O|PQJn8HS zu;>hUs*t}|v;+}=l-yZsa#I0s2F=&8!u6xpn?pbPEJJ&KI@8R#jVPUbWUZ6QIHk5# zucTYCCRBAYYE?5xFh`;kR509Km&BH;)Tl<^?t$>)p8*e-|1)5a|!G~j`T*j=yZEtqJ*avRA530v$^mPSSbgFwxJgGz^ z3@M4M*KylAy2=h_hcZihZhm6L*2 z0OXr(Kixryl3C^Oz?cABNyUt04{sfEn?QYi^Y5MK zW9TlD9xtA>xwJVx*siBn_a|Z*Dc!OsVU3__{HGTOszxSKRNcTX5WXEa4sW`T3Pszj z)yT6s8`bRMqTwxSu>*ij-9;?cC`}*SwW#WgfZOPleFcTa<8)(_nKp!EhusK}1xAT3 zpVYiO_q+J+8$CkTI`R$D&$_6_>_~T~A7r7_N9Lft2pt;})*cQ{S+Ss`y2wc%Wbj;d z$o&HKZ)=~W11XJp(G#y(SJk-A%({0oBTqu1evb0^PLM4QnyL%AYcB2 zuQre#IgA&mLnEO6pljllY9}#_Tt?cxJM=h&vr=I4PE} z>Xo&*)1LEoUE$abm>2e&z-Idv_r7E0iqwE&VjrTo|0kk2r)h#j+h!EoLl`{cIKD&! zD~xO=oxgK|_DuEHO96?Sywnr=@)2)vmVi>x*=Jk=M*eH;>?QJ@^MIskl;d66=8J@s zoFrxfXnkstI#(FlI1@@isf8_ypYM z#T?BmaBi?1eH|7i33^)QTD|iqhi1vL>aJ#ih)GpO{ z`I)40NpW`)fIX#N`lX)Y>#CjJa>kSs=i8luWbx=VF_2j}L|m|$W^sIEBMG2XJHV<% zXEn}{18k0dVj)W(8a-J3P8sK$(xSlY-8wI#*T?cqqOOelO?Tsh%Yd#TE2M*sMzG5& z6|3jReE&El)^BgRf^<4DA3HhwgIwUBNXTOOuq>zqv&{?N6T6e^1SAZJP&L<@$t0Md z6|Yk%v}X@qdruNkn+N7rc7`qTtFw31qy=AQD%RoP+3PX z3<}sDZP)Pa0NfPES?@2%I>NHkBck-mdS42@rqYpEC70XN+X*IKyv`N;(L|`RvMhwv z>#!TS%)|@&zUs81GpKY+?6}hJ87>0(Ji4~}F%hwRB@l}&K=EtTBECu=^q59xuQ6+D zvcd5@S|%w|XS0oA&=?6@+;7cyMYmw?+-Xz8jU2iHoYJ5GTjfe`wP}Rb-naaUBIb&MG~L*@OX5f&#g97#-0Zo@k`J3x^_qw46^Z7 zxPxu0TA?voGmkzrqUs z00{VqK6mLT+^S>MN^5V3F_<3YCYH#)dEzb=w%V+1FQS8UxnTX3x9XMgW(17XLfu!% z4Gs}7-1rhEPOyx#pPUZv0&Jkw4%aJ6`C!9enc4PAKRy&F-j?!8_}33*9w2sgsi3lp z*VDqP#lk96+p?P`z)^*zb~tiD+6D*@BmU4}KvWG>{93a%O`C|d%SG$pG%N)WV1bB` zYg;7&i;)G+Rzm)9d%0bmmv#ZSghOq4(x&j#2@B0jRuP+kv5KXN4KbCjL{Z}aQcaOJ zDZHf4+W`@ZZ7q#g1A%#rrKVl+P38gmE!D$6RJYVnLP0bNJ87RSv!Vd(Vh@68#K z6`MBmMxmAH>@-jQvX^QP2sH&d%V~rOG$?4>Lql#@?f{~DBjrF%4S)Ml0L}gbT{Ygs zb^|K3+yE&~Y=EO8b&-GU!QX|6mH^Sa1$kuohPl7Cf!4Rr?J1FY7{c=hav&)`W@@t#R@ULF>*|A|W+j~4aM6~) z=!h|N8j`Fg^<_}=4`R0m2tSkSu#%ic+XEEbckE%-03fCLkmPQIYZK4rkkhJwhk*0( z@PR+`;fWL{&B)-tLB~Hxfny-Jd`zR9e}Kds1wP1YT!A8b{y8$veD*JXq2uJ9m*u?L z&6H+mCPr_RI&96L-_7}y05{DcPHduU&4XZDum`C7l{vC9b8}=w(7sm)Zj*ap!xtWc z(>|BU6eUt-zm0&4a$L8$>fbRYzE5IvAPGliZjAr!U#CA*AP|5IU!5alwp98w?_p!T zx(M?mDNozX-F1T8n?Tr+jOI(TQvAka?8gdmYc4zH-@p4MR}z8^fs<2T8gZ|e9@abG zdEo$ybNZnpq8K==>*i~0Z$dMAV;UDj$Xn4+VDoJd81^C3-=W!Jw`OH-MQ-CXvX&rW zO@rBTxH*{qCRoeNv!{3ak12IC(?Ydduk)$i)iN%Ytvl1sv2fZXJaMLJ>)l?;b_llh zsb6XqpG~PvV@>wm)vV`ZgL|)&#Q1YYeMB5)Jy8G*X??SupT}YdA44L5X61ye9H&v^4y7m`pt!svI4Y?B)lmWi zUze@v?iNAlfIucPWKB51$PSl*bdZWf!gxy%H~IK16)FvFCS~xV&{QCY1YDKWMhqdf zO>Y!U>i;va74B`-<0C1z3WP(gk)Qc(zMzqJn<$sG_SrjzCy)ZF-!khLz}m3W72TwF zH(}y|QDx4f-LRs}?T4CDV%3-fT=R!(Xoycwq&7spbMx3LY6rCkAPyk41Cl4!23s|3 zQ)vO?3Zm0Pgm-+@*p%kn&a`ln`c0njIT|KW&n5nwstFWKcuEX$IhcXyc#azbs~jNd z^j+dav3<}X{agl>5PYtxbJYE;2~H;+SI9~hmaY8^9lA43CAet{Kt}8(Uz1$u zV}xyHnbV?N!UXl)=-gVbL3GQ5R;insusY<%o<3f%hGxlWVKxO<$BuIv%}}Va+kq@f zWhx61anfd72LA$@a-YfdMOoSnvg|2*ymE~7)wd>t(EvEJ^NY?Vv@P?>TNKZK zWSTriYdFGmKVBVa_4E5=gNyW3mf^53hF@QS7oyM*L7Ft5&0zpa@|N9W2UT%8x0a#~ z;a|QM;)2}-kPKv9Wlf0Yu0|XJ2k#0$QG&|GXfh?WCej9lJd@(x)zZUUiv+|^V z$$KRC3IgBxmOOh353x{*S>2bRBZ^i*sp9KDjtd9uf+n&oc#$qWAn)9b@39MvxDyKH zW6vQ}gkbfkUhWy&S0H3(mQDoPLODhYnK{ITe*+CWt^9Ku#jI49vRTR>{>m@42m|Xj zw(Y2QFhv$j!eQ(#{$N$RbS)x>*eWFNpFeC((5d2vB2T+r)mTB$ zaXMcdWSlpys6CJ1!G5Xg*07d>qVRznqLg9ru5UWQthuAb{Um+=~q zc(10{9dl!oi(gM63(4WR)kHOfTPnsQY;bKm`W$;)S+jggEJ6ue{8KPYMO5Cr*#B81 zQRGo09W#()sO-iXby~u#ubuMG+xB##V~lGYW}&HW4iPe~k9vM+u|}{LqYQq$JKI(j zP+c?fTFp?|0`ERXJMcJVtw==iQBEUjK*X$(eXo){R%{D&a%A5tgLSO_+~RW7a+BG6 z5xpDO{+Z6pxCk5Uxf#T-$d%r2?zb1ueJIA)392%^FB8s9L!Rnv;)!ivfwe@1C_k$P zv&-EtGIa#d-NRe+d`y-MZ_62)RlhlD1UXX1-igRFDMQ-MnCjlfu`(xigRa*RdEF-p<74w4bjyWB( zSZ0FqEmZ)rciJW-RJRzFty9(9q#x2mnV*VZBrpA;p1TZK zued8oKikdagat@~1M0?T(?RT=0>*F7I=#}lL6L>FOE-p0@9EhDE*8KdyIO_GJFj*J z+GO73p1Ve{kP%%rjj|v*35@VtM`*NNfp!}^j&JzK##-3K_>d}qXQ#uWLd^FcaVs1sJ327G_3g_Il ziiJ`hQ9we4%(St*rvio0zB_SbubJ-zDX|p^C_yyyP!RyVU)c-TXq+lG5wfih8eRm< z4bLoMa1#o;8Q2!?7R%^l2f9gN{m|fPRB|ZKN;|ira0y_iq^d_X*4v5~P|g!Ys2t3l zl{OARjR7D57JeYMe)AaZrQ@y>+E0x*w>R|bxEsNTzfTwCQ4an2c;=9_8n%AA|k}( zQNwXw?luN|D^Zf3sdEVwbeeLn1ND;#={7O`%FI|*(xDYMzua*KTx_lk(eMSDoYYt{rcU30%-S9{FUS`8ktCTy}!QJEC_0HAbz;_>__Z zHkK~qG5VY93v$&$B6d>0X9qU5vTm7lC7@KnUY^uL(yW)REso!;D0Cabky&jsp($J{ z=7}=;)XxV21{Ti!LsLdzx?VUHNe7#Jh#y~!!ao0l)CTB9}r#rk{z0znblO!dx$)DxWyqJdh`sx;f)Y%Xun z-%s#~8BXfeQa}aqmW=$4`IRFEbq^|>2z_U`kW8x%vj^9nqb$^wISn4pXi*NJpXi7+ zc{yG0NmBVc&r@yE%j}+9cM>5L#ZL7s7~>i_3ni0ELo*P#bCtd8DNwAK?5%Mbuy-(jvk5G} zbEUqXG^*CWrMA{!g3J5;i860I*LIiJ*un8qTiN_1@lYYmC(CY+FnH0Ymz#}oCTSf* z8}a>66=SdI4RseX0pB;DtmuaVH@9Zd*Q`p<|3L9}co%9g0;dnuu1W3~RL<>~%yca2 z7T&>gx!WTMJ`2EFIFlD0!{Px%U@Nz2rNvui2B~Z|3&Nk=Zx~2d3Wku_;tbMklg7`l z*V8=5=UYEI+P4dH7r#X|RzE}L9H`_D&RL^PlGuJ^uBZIs6P_&hmlTmC@g?;*xzfjJ z_mWqqCKbMDOsFA*cMV@$4MOrB*mGPbu<+B;4#kByGO07%!g7%U2m<#@hEYNI_ob1y zJFj?nR=@8cm%1}&bDXeOKz+07`N59|0Q{St9>R-W4^e3kh;Xnf-^ZII8k1y1ZKkam zpHMDz?`b{OXCsUMCTwYM({>8m*Iwt42*0zTJ$NcO`B$zM#dgY>di93h2VHHw zfycb*q~7>en`g|aCNt~}W+)kXY0$RudO2cmuzGRaB1H)m!(3|HpjWw%FG=*Yd@v6; zbym9Bq#v!?(wtA5?NXS2e;Md-ti5hf1o|q3vj22{DX`7dzW?B-mg&dksaf=4XUu-< zr;r`?L7!+bku#%?c>8!sOdx1NYi%`|y+5764QM57=!?Fb8R*BqH}o=)%%P@{Ak+DQ zCRIqe4jvdVcm;$jNCPZJ)8o&LeB4 zo+x#;IPsgw=jqMGp3N6_-2uwghs_jI%Iqu+E|NtzpW_y5rjT zng?GJ^;ycTxWC4v>eYCh*T)OFC4a8SE{wq#oQVZ`CRd_ueioqC=yOG4Yj&@!6=AKs zN9l#!(v_dfG;tFctvFF#(M4}U#A+iM4K?8{du$dM{DQK-8#QaMhp&z$dbhCW=-F^J zjSvY4c@Uple%6Qv#EBA4bu{$wFv_2_>Smqk73?7p(O7O#dZ)0lKqcRR%!=_cO~d&d zntW6xwd)_-Rw2BBZLdNZKWRC5lKYss3f!*42Sint3w>v%jsdz>`g79BBC_&;PP~IV zvH%22=d*>sJ|%VrnzBmHCk~$L;hr)goP4Y4S_c!=S-xrCTb-MH_N$e#e#x!o-30_{ zdp93ecIA_iZen}iXr0?ck%RDiwH8mju`GrwJ4}p7>lW>M`|<9|R3qPF*5SpV-IM$e z9d?YNv+9~mu^a!f{~AcLPt&ys5z9h&!4^GV*N_GwEf89r55RvRsSAy;HIy~j7pnCT zyJVUZcU=3hM!w0|B_EF+9);$VGO?QFxvehhpb&){t5uNe1k`({ z3gH>xsKvs(54bV94GgIv$Q!?#j#Q9kU8xX6@|jVn?^e=3{HT_QbRo)*<+xkPi2!g zqs&GWX1(&-SR<$jO=|eTcN}?~%(KO2lDo{~-5@0evT>q=*dQuRb+;s2R3>oxX%4 zD{JI&$nmoNX!th+v>>0#yDq^q!E5=LaQt$Vg$o*ar%4qO?w9&t;CoATMwb{&^dA%e zL!{Znij=ljZ<Yw5@u}5qM{1GrPaho`9qPc;NNqu{b}hr5KEL^raSmqB~=weLvC+ z;K5r1+f{N$lz|q}?Unrd3y!_`>qI17?s-c?p+Pq=*LN5_hes~mH2K%*_%zPUXk$b~ z=g5|Gw!M0f1N-W;7alTuKTFkY zbB)_WV4gs=F%pP*e$1lBJ++G2yjV&vPq%T-s5NgFDG6C>=wK58NV)CJ9$-pyc?ghC zuxhD~QjCd)cl)Kb<35U_+50ZAuV|Q@PFEz--iwf~8qh^4L5f&Pa+}nBQ_iC+>8?$E zu(vueO<^J;>69e^`*g( z9-}HIHkYgo8 zr)?~Woh@gkVt?@?g>KnBcJDfGYAH;}T97fs3{L z0P0js&~zj@(sw~mwbj%HUTXL=U}Q=g9rWgT!5W%CIXZ1_e=Re;&|v;(_|}E{Uvi!; zDop&agEK%d_rH_fZ}N&mPTFGE&c``ftfX{1kQ4YX>>Ig!H@Sp1q*3Xcx$tzTO97sT z5`Gw+%?c!yVffb~g7F8i#}>|QxOXFE@O!$@k0>8=K3_0#8JgL;mUNtDyIY{vdgY!K z$Wx^YxIgf8#AU|t{0Z!d+3A7i$($Id<1FQDW9Pp40Ej66S^Ib*h#cv+;QShINUg>0 z+;Z=TF^ss~whA593Y@45^GVc5W73ykeE4JdfrT8X>F(>#c?&hlF2}|bLw^oEA|N>C zUvi<`y6*AB9kS5E=^u?yy$3$J2*HLL`)-u|Yds|+tlGSR038NrNtz%6fRz5mQhKbi zH-lE>_vB0Gd+nM_@!n6ar<%t&H=*lu(v{KYSo&HSgr|rIO6aC93aDSMm={p9cNzLL$+XPFXUHKwOYMRASAZ$diqfSW${^SEt2;rWq>+n9Ph<7b5jrV>8=OjX-=kZZD0nDyLQf)XH z9zY%Za1z#s%H3PTDUEj42s(d>`L@X1eHl+2I_kW;2QYfCw~vB6%V;1wdq&!_w9b zLR@&C!`MC`ToiCf?_g~bw<9g6VzoQe|1!{me=gvn8dt`vMcQt}{O(X5WOkhX#n&?e ze@bBU&o_G6IJB<~_8`znG_OuN3AFQE+OBFdGd7~M$Cb|<4}M7@@F;%qM&G+ z?b@GE7f#ohZV^oT>Pw30y>6{ZIM)A zjJBfGW1MfvH@6`CIT67&a%3RDzg&kHnwec-2L_qrg(nZ#BD2xO_}&B@cBUP;XXyII zK#FRg^LyW5yjSVKH~HK79Xt49&h2goKaLce3m+21w2|3BH%x;hgWpurEO$-@?(Kmy zIF~%}YRc0M`&5BZGGNlr?BttkoG#O<0pBDA=^V-1?!v%tJ$0$K-?crBw&CyrXqlf7 zX6Q&)SK6sP^Ks{)m&km&8ZnlL!X70BtkShX7|d#kx)`ki7B?RHXy4)V> zBZz4rZ(A5z;=6z7;~Bu{+apoZ$gcA&ppr$ujW~u=QR*cQ0M-B2=iXmsjL_-GtdXf* zXcqP?gaJzwQf&ZQGE5n=1`yt#S~Nm0<&HZlV(SQe{i3z9?WeAvK4L>y$QbW0A7jyD zkh3HF(qnRq=;*p~8z!(emKU7InxE#NNy}4CjaKIWr3rQtU~v5XLx$Rrv%*Z+h=D0Tt;dS zB6ST&r*;f*3o1bqZ0a&`r}rKqj57WUBq}`^@;9vrihg+2EBhc%xdl!x?0ZE&?VoW;eF3@|ohdR*K zjf<3Dxb)`@J|w#GbvNwXR32;Eiyf=st$90{1ngdbgl;nPZ+OLoza&Y(=btWiwa(~L z!Fx4-sB>S6WO_4;xqFK*WXhd!#a5CT|XGh7?UPoa0YQ1N}jF)B|BNZTC3vD{i2{!_8xJ;+L{%6_!zNkKd^b%y2haN8|x;`bgxw zmfYx(o$vv!5Q5d~yn{ou(v8b@ebE6%xDb#)U`&`vk6uP8rFSz7vVrW>a!E&QiTr59 za4Cv;>2J5>J9vHx64`s0cAcXd{#zu-gG>I@^kOJ5*Q2vaUXT;`2QDxCB!;P%|WC94du zEFyYaplg9!{ysdw6>(Y)d)L$82Z;npoUaD}@51?DXIsw!2ykoT; zuHl!6?KHMnmJ0oaH#=UKQO@kIb;$r)&UDlG-q$)yw~Z zD{|!+OKX;JuE(xo-<&A}4)l&-ozg8@kS7c3KSlu+?V<--=!Tp|`@VnNi1sytYEPkj zMww#siqI<`LTI+XAq5CE@h#AA->I(sJI&f#569ltrJF`!xbTt;%+Oi-lwsqMo!fSr z*{^^ludwtodfOW6#k3dh|(^z)6(vxhMR$!lGFIF$p&}-aDX)|;@cE* zw&IS0^Kkqf$V4a?o^65k3B=NwS9<=skR#zl3m2pRRLr}D_TbOCY&>vTO&|PNBIuwi zzRRFR{!H=CDb*CQ^*?mAa+;gQSTY^HqzhH)v}S;8jW^Qw$?9IYh+{M0-zTdIhSnA zx-<6I9J)IP;P`00uTiCH8`o9XVx<&ZVDc+^A)a3 zP`{>WKlt!`P=1UYG1y6YX>gDlhk`L-J#$!D^&g;Z#Poo^GV4@; zrR;_r&uc!yoC%sQ#x0){^}nNcZIrvQSD;RT3AF%zDw0_2y;}_QI)6(OV%CgumyE|4?!Z0UoGJDFuk=GW*aS@se^(#CtASmo#JJUr^JesW-$zKB*gojDDWy zs+B?k);B?L@emn}Lf;v6&Zhn>PBbJ$W7ES$%Xd6nw!Dd6&K333*lehD?CLUxtpHm(V{B%Q-qat>9HayUR)Hu`6g_TpWYKsU-fBef5TyokE;jtVA=e3a z<`IElrkOdJ9ONFjFj)3u<9AbO4tv^f$)*9&8#cl&aK|T>kXanDQ9?SJl>mJ994L z+}lZe-mV+6_HbC^A4T&czM*bVR$4+WR70Dv>w9q4q^z0GU%6R2+QIISco)HYZ2`nq zGJ4Dwk5WE6wo~-4G)g+-CG7SeuymtTNxilCLn{IP&Ej17t4J+X9t*Q{_djW=q{ zn9_vK_+vXiQM15DXBJM)qWe3ip=RH#Fb=E@eZF08mI_k@ZhsU|*_0g@p9vyqU+LX{ ziIAu&Jgy|#2Wri3u?qy?dkrlhrE^y^SdrJrzQ~b~s3I&lcc9&309pWCu;Dc_>k?ml z{5)c-%Yji(!Y&ozh(q(+{&RItP}6Mu#V_dKu=mXa*B)Dfs=%I@5EOEz0VLwULUtLU zirfi_hlK@qvAD`4qFum+;lf*ae8Dt++Y@4|^8x3xgk6V)BNP{qkOd8wZa);WZqcU4 zDV+>bR)8*AHYqPsWB~!+J5#6&7E5y_XRA?WD=xO(C02hyPlvycM0THa|m` zt9hLuWU|s)gi*&K=eEeIUA)OfJ&hfs>G2{Mcex}v6uz+X%C|X%f5DMoFMl0sjf&GR zw}lC?0y()AhO<0&#%0`l-;1Fq@3HYasAJ+Gwt`_y<9k3#x7Z1(L!LT8smVDc3Sotf z1=By%;rH-~8hO3Or<+-davvxJ4qSmUC?<)(gg)~_!%Udy=76`<_R6BT;`aW}DdLQl zx6Vz#$Db?)pWuN~1$ew=!Q+xpP7#`8fB_Rt&p?Vpx*llxYEDZ%e$^-4o;UVg(J{rU zDO?ZU>=Zb)Wmc7O-f_kkphI=Dfs0s3H-wKxdYGhK0BYo5R>5Q>HIms+VGZh>Z9A`2 zsVT;jq~IAr7_O2z)PWh>Q;?K$Jo#B4iC=iop1J5d zT(}L+vBYy?-|;L(N~hJ=9n@>p>&aT%fb#On7O1}nO(6i7-erdL7HzY2yHp;((rm$& zJ3BWy!SBPm%oGZfub@R9-1zV*E!{>wc)BF$)9<8!l5DH5KMw(^Ip~o=nmXRWQv#-g zsFBB*%8gYLa9LM~SZm;{tUBv%lt(3%1yV&V-F4obNOo73YtJs;X5a6I53PJ|L?{Kl zB-^*i#GsI&Z%M`k&ocDfcs*RTov%atdt&JiJ*A0zHmiC+@lwUB`C3MT1#H}Nd+aKcxhYJP~A@}b` z-HmBE;O>@B;E2B|WnUePuVo-+7@FEf+)?wh0Mwg6S~XLw1Rm;ZG=8@}G~J;L%QJz# z%eou_-`V~|m#~jd$~z3WZ&i@FeRmA$Kz(d2AGHF5bFXh6x-1a&j#?)X5;rIFlK_8z zeoeYo_BGHzm?bLaKo7N$qrZELvB2rAkJWGXe&KV#!_LpRs%HYRqg*V06XvkR2BSCX z!ZYT4O04XFxlw5nnvfZ2uw}tUT|p{(^``khOSOI^NZOC68008Ys;s`|QY}GMVFOg< zIi)Gm9`+l3irH;staSsL=LlBgT({lc<&ODmnio6}?L z1mv?|_<|`H11meBKOuaoG0evJe42mHezq1XoC?1?L^{To5f{`V&kn8 z13EMd63_uj`%eRu7WAj^_v_~pgcAU}x)7SzV14Ly3nS<~l-F#4sfR4WcAkX0c$g7H zi*PpZHp|9YN$^wsa#W;=hivDw6dNVEvcZ-fDUgY-TC zBgv;Mx@O_Rexm$W^rx`*rx%h4B7$=J16loA*|~S;PC{Cv^U|r0L_>1*2(0824KNi0 zV4{mdVrEd&VDDH#%J#uo_RA16UO~n{RJ(GWq%PWN!zrjlUP!ZwRlBs5e@Mko?j}a zO@SPTRMnpUf!@84fX zf3-T`&N6Nva3#=^V0{62KAE4mKB9E#H2fPn)9Qft|5|uWi!x;q`GP80dBr#2HZy7` zBm}t=EN{E%h+<${7M7(&4Rk+V8;Ouzb0WPLCQ0KpD1TUoXA|XHvrzM@vBjfTu5&|r zjX%$11E0?TwR*oYknaLFXj#p*c|_9UIHjlcK_@5Xp|k8Gpl}9P?gsVWVI4FkHKl}# zURa|{yq(mB&_*+jU$M;Z>96&?ftg#WWV8q$Oq6e-S{Z1W44T?y<9)3-sd5nUxpO{VcdRzP|BYj=AKfcdlfL-WS$aTouPp^hS`iHBIv1=qO^ zjSMh^?Ta?F9|=*S6zYA|O<`v;lX)8Y@z@{&FKeh^zLJ^jOfP=?B#%mS0{jd!7u*1< z^YWA1VEM1%bM=N;NeugQ1*qtI#Q#CplLs>0|NlHsPft;l>2%c7F+zAM#~h^#wKZ~d z$l_7CuMLr0omj$?vu5aWOs-IyTxCP5O^yhQHlds$-sJJ|I7{pAnuz2C3*>ptI~ zKkTl*ARwiU9IxsK628rl`64Kxl;x(~Y0Q(m{Xp*AC&HaT<^s8bDFo~D31{g6y^!Fd zp4Bj}yAulVqzqE+!t&AEl~Z!Jrw$t}&gNUo!-f*$>l#0pgRq|S{RI9{MLfOTW0Oh= zJ$(s}Mg9iVBZmuuHBU8bIC4;KcLs#=({)b(m*}jL>)ovpj2K@)36AnZ{^J7g|sVb3oe}vRy-)gjXBYVQ8 z%AayCu>n#Jxbhi&gvNZ;d8hQTj!Rc9K4TRw)^<&)GdsplS3QJc+LehY>MoaTtP5aP zJ`223i&!7CG(t7RGIyfch^ky)9m-h>yig*zxF#Pv$EB*oUjLCsRx57zFFQpo`MIz6 zbPV)_*#i*O?D0Rdq|$C-C}fGFjNXP7#l^~_{at=)#F;Jp>jwj~2WbiZ^>0_={(Snf zH%q_GrL?NZG`T(c^7V^5-++b&>U+Sa+|Zr#d5<`;hdA9WJHHQ>hl1#|_W+M24gm{p z>wvgT=U0w_%72f@{%swisKVWA#8Y0Te*w&gp?+cf*#17wU!m+}*@|r{0n`#8MqCa% z*d4!Br4kfTEcxvEMl)hh#rjL*7M6GDs_(J6Tr%EZJY%ia(EMBDJk`WSWr6OuG$S;j z)xM;zE}gT;fRSxHy59g7zqNH>FaP2d(p3p{D~jjd`8kgS7*R`Rw(u@=5L`%KVw@j+ zCe(#@7_pYNX<~M=>t=n{1I5g_&tDMQhm02%O6wC<-)_x5PJP8?jMad{+fcw+4seGph-$aL9kxXhoNf8uQj>doF;h zy#gI*GnPj-C43OPs#UQQC5^7?D>ZP_$O3|J%h&05p_VP+9eg;Ssduo8FMYtSO0XVN zV@4Y}LaP+AM|zubHgCk`YlEk_7N#!%UL;ke2LeaxHJCm1*huXPsIxZpJ|jo^w}exO zmQ3KS3d~pBLTt9OI#$v?{X>jAxtS-dx4uqKjw*geHh=ut?BcL!{rcO%3GRV)!L1%3 zCg9#|d&9)Db*B>6P?}wUgDJE-wAyvw1J=J>$^Pc$-38isw<8Rg!9g@I5XWy{GLY!L z`TJLQxKv$=x&Rn{M$qO~vIk zH)3-^9fL0xYjEPl;glFL@~COgv1OcB`bl1OYBDZ}eHzlz;?VdWPtwA-s_gZqq7#x| zur%h*bD04BA`~_347PpR+_GrfYP=fAjEP9Q)i_dad2>`j^bz@5JjiY9x@c&E^w|7B zjI=(+DYGbS}ry;-~ z)Hh&{i47V>;Lid!z!;Qcq=?$|>&>{QAwg+=2Qb(8g7z3snJGJeO`Z5dg4}JLF91Dg zWkDw&q(&A?jHj|-rKBzTd?|;0-L4A(b}k`6p8YR)Ka9`W;2rGes0XZND(t%HkSIQS z?8=PZ0QAUb(HX(l!AvN>t##xhB5m^DY1`TG)T~y!Y0eY)z^6R@>YO}81S@>5GDdT z)X*1upBI%e10IRZ`H6Oa)}B^IdWe>~Q?@6Rjjt=ygp`&Ej3^ue#*5%% z18*g&)h8TJGxO~50pEgwk{D7GTivQxu*cj30k*lK+!sMPEF~Gkx}C<@p|K4r^6 zX%}xc(NMMr=Z2Vi#TvzmEEkJsk1sd5jVtc?hpHa1-mK~#u=~5hbwF@vWWICd?5xM~!DIbB)c)G$DmPZO zByP}%cD9uHbV6Kgna(;}pvRln;zZQtkgK|9gWbRr$WR5Hueo%b?DjC_7% zuN42-vsE33F)WsDTXekec-B^xrB6erRFBdM=jA9r0!59LWz=^oa(gxS>|jEJ|2u@I z&=Ush`Q($8h&Mm~@H5uRUv1I2nLg%^HezVPjB7`i4%e>J0~4%Q!rrk(BC}d%hf>?U zKX1P|-+TTjw6%sEMzYjwf5KJ!rEoUvHv3+b1eilt_g!cYeQV5 zuvO)1I-yez?{n%zx|P# zXF}_@MN?frPJh|_Og#kh)8r1&Qr=U(gT?2n@RXcnx(dI&@U%wzW?UO_8{_g`>?=ls8MJl?A1U-B^8YV~Wz-je|oZ7ES(ssO2j?ed21i*^aXo3`CA<;I7ec zeoDsF%T`FST!!e|O1m61TQfMY37hm0w`;W5K7V2UmrmBi6o&c*9>6^F9x37SkzEiC zUQ79Ymg;+v$Fw_hWFDvRwI;m=V&O#w%H_ z3D&1ds4v@UR5f!ey>uDXbJh%TWcf&)QKwE2{+U$M360M=-Uo>n&5qOR^y{wot_ z09S!OkX5-b^cFY-o+eT{inxdijgMVvS$-NM8{Ffk)Vi05o#99f@*T|Hi*ltRrxpb6 z`i_0uojih0mS+Ty5DLV}1I>?vEACW^qn32!swv_`LJ(x* z;!-CF;zybb%~v_FguAgJoN1hJ@CPqlrmt{ExNI!K&?AoWqv~|8OP=6{Cip(=2wdgNnzfA(ZzvF^ z-SL*S*rcfQ+uXA9t0mVhpub}KbE?+*vM~*eAqW3iG~4scyd;(`+%Uwl*JW66+g%DL z*lc~A4WHmD=(uo7EJMyr5TAFxig%xx{%e8Y6G%*ip+&dL?wbNvmbj;a+a$|)`cqk( zbCtwD(O1c|EPF&`b-o0d0eb(#wuQ^a{Z(n>XwKdWE0M5Q4ZN;NyI9cuP|Z6EiP=}F z{A45J{ID10B17(!;Q5nga_5w36j-|W3$$%+kAFV1t!WBk$VDW$4TGh)V-tJh6Ok`b_hsJ~;`+VN+g*ZQC z=}O~>A;btc&M)51|vhqT83D%Ty_h}^@FEKVpHM? zw#=PMql5Uj)f8JewNSp@fH`Rwy=U>3U)W^cmJnJ@*>#)Gzw24F4(Juqk@Jv9d7&%A zDIe%^&VnSk`}h&bEUZ13%g^`^F}^4!GT76&Qk?vv`GS}L?*7f=o3k+hG=Kct zHxyf0>I9k*PI*R`b6>otWLSINf{Yv+*%NC=s1qh^TtPTxykhPNY+Ty04~X(Zm_4Mj z7WCn1(HP2M6Xa<~7lQx89#^-1l*F(`H(7KQ2X0ShGy+3OvVo7Oo(^cYAJy3Oje8@;RUm~ zVY$F94(>j|KB&zcVQUU@?{N-=v8z-LlQo(UCof(b^+IC2;3s`mY8|Be0n;6ZGa$RL zpz^P5cc+!aQ7)LA<0FrqL&B+ipwuSng_CL5_WcK5gsyF1nZ*C9~ zxao9spxOzJp#Be%6N6GwnD>XKRrWpPF=TCwABuS}#H`by=b*irf`9~!7*84ykwm4s zk9Z|q^G5PcPs9mr<}ZsXVO3w}RmV_6LvyHQmy462ew#h2N63J6-{L5jAr+kM#`Ej6 z63z%%GosJ#V)(!VT8iIpu7$Xf`q`2lrzI0h853S6gDz3tcVQdHf*P6Q-_}rU@l+B} zS9kJBg6hiG5T1>F44L9kcf8WO4f(WYb3sK_9L;j*q5O=QH;59d=&W!GoRMlJK$&jY zSV}pNMoQ`z01kq9M%E3VPg*ZP;R!o-aq#Cyq%ye5vK@yw%s`8w{BhfI9YQ7+ad3XG z{8?DM(%fuGKyOWQy5Z#1xVxf(iQVw%hcj2+)YKR|UwgO%d9}dJ;%2m~ks|I*)I-?# znu`d1lg4|E%1^MnHyH)Y82zkm=+)TS<)G+t>a>*3E5bv-PoJ?Fvgos)T@)8|obw>I zdG@MB8%%TOzOI75I3AG%Jp0d%xC*hfmO|1BBAPDXJ2Cb|!&QEoWW!ZLwAjt0i$owK4FP1#N3gwV2d+S*7p?}O^L?JzQh1t#pn z63`!njs(#8DB~=2Ey^!Lj!rRctqn7;fR#==Fs~~JYvU;Ofn_Cg%Jq8x)L61r!i-x& z`To`G%+#*h2JjS_+dPIPTX5b~+oMMMJAFO0eL`l2#&ZCGxjYG`J)(X1_qN4Hm!1Vg znaFlO*fx}>r3)OjM_pP3?HGy-@fgRVOQtJK$5C#IrS%vv%k9h-yg2+fM3JYrL8;7m zO;AWIqQQV`u|LC31>@f41)DviD!XXJ&SQgcHzHr}U3=e3I-uuj={`+mS!U!YvJ7d?|i% z)30VW<(zlXS)ZlidRmD^=jT+lKaKYwV=3v6NSXCg4!|a4HuTuFZ4s*isI$3k(ct0~ zGViq1$aa%qB>^3ccU}y_vvzFZpnwGXH+12+BWyaTsIrflh zN0v6u-Qa9LS5@i^n*2k^v{*%4`*s})zZc*Mf*=a*N~{v2j1kgu5yCz>uvP;{K7AW? zb^MSotMTVJIb%%J-zW|i|4kWJz)>C}F=>a=>FPB$I3l6N&@5gpVRAubnXrp>Ov5nb zrJCdwBn^whw4UOpd-Xpd;vTC~Zt zOp4i?FqV`*p?YX`#&3p^tjCJ<_=eT`=gqRmgV-#w-q%2RTm{M_+of&z(Mj|)3zE|`O7xn$x~Cu-(Zf^D}o`~+yk z{2jZg)xwW{{xMF_)^4gEi$i>5Q-QCs`40F%jvpfuNwgM<)#&m~zGAYjLpDU?Cd$6f zB=v+bVjA@^UETOjrNq3{rPJu$Sv3949F&c)q)vrgr>AOl19x!`i1`-W`mX3fWn6AW z2yUNcM+UZ_S*=?)Ip_VE5gA9B1#Utz{fD{8AE}k{fp$`dcG6t5(<9o<348)w-q}tY z!%f=37lLlp%}W7IC9y;>76Be(`@+}a9|e5 z(yYY&dUa3vLP%Cd=ty;~h}3es`p2|pz4MEx)@{`Ghy;n|$&6}{ILas0;6OOKh#;Qw zhwy&W(#}8~Ty)9UJPCdoH~QdWU( z2ONsG3K8E^j_NCB90Z9{J`$Y-fNO4GOEBtqE8FUIpc9D2kAj4NhSapHI`?b*uwngX#meY*O(4jVU2{-3_s(1(?e!Eg!mR= zzq^bpms0BYATGH>BvvS&HB(%)3f2xAe<869;)8HNmXPf~)a)0NPwNrv)GnolFWLmr z1hkC2Z9aYDVv)>#oJ5>#;V^gR@#rUjcO@$n8lg{{2x*DK3 zE|*8MwZFYsBX-|HV(6L1(OJ+=R0Z-coXLn~8k*SvRhVh~2sWN@4Okap#Y9wcgbxI4 z%7PliM+CBx#LoIPM*ISOsgk9`BUU!-)shm)IrgXn*jkO@$lFQSY~*>HX_4>bEK;SP)^^U~j~$xuj&nH$9DL(1m%DdVTk9fF z>kyiv=T>MCZMP&NxbPCh3M%jih{#kQZg4faAIjPHGd8sf`5s zRvqT$B*fBk!8{KGX^PJe)Aosz!cFkApGbC|Pxhc;a zF06_7z~=0% zQ4qCb-Dd;A=U!`rU zdZJa(JYCaad}GRvRl#C*lQ3+iETlNaE$a)YYm?C5Lq6okXSfP({z}%~bH}S4@hjmf z?X~8RK6gO3hEv#ZmG9zTQ=9|^2klU$H4ygC8+bNNKe_2M@E?F`culC3&F*u#rpbPf z`k*aI-2r9yxVGa0x_9 zP@Ve@AX@1Ims)fSC>^u@%D5u>KyJzOm*HnRKV01u@XEMVK_>VolDVm)1{fA>z5n{} zrs|Ob*zGFpSb4MG|3%Pwa#3K8w0FBv_rS+?mRHZZ=pV3nNU0Cm$YiJbhA3$zgoY!& zn?4izHK{`;7)|L~;@$!o` z&p~+Ix_O|z6%2Wx+E~R3<8c)9tYL&AZm3)R9rFs%rQ#>+OR2F5XpkuJA^j#O6d)gA z7BpF2+J3RM-~B{?3T-W2YYx|fKBfX60Lhiv>7kO=_OhTyfr-}`O--kq4?e|cW1u|Q zEm(5e8_yOOqo(rbg=bd}bYh)q`xsO8&y7n%Zf~qcg^cXP15TL;`=a!KOMH*pjSgD;xX`qI!a(V7bc(#GjGkgZO!lWb+(M^%Ih zQW>OI!50ii<<)*9_7@Dkpr*WajjWog2n_4~ZzA6Ulp}0#ccl)8D4k?)9ej;ZG!W!` zRYB~Rb8v022g!4-?!Sgd=J}vH%RUO{#W9Q25#N$T_ z*8)#Z=5BBU3)EyD7g6If)ITc-C@HCvcxgT2=*KZ_T&Ix{lz*Yt-Pd4y)X3q0FJjIu ziSTNA0F0?#T*IN(UK)w^>?oWbzh{-^6Y3CZ3@S=T`m-+?2wV-UPV24Ee}11z;t)jq zAq_pMwSSl)cX8Xmo@7LlAS+!#GRLB3W()QvN?Z4xf@h<99GLwpTUdGV6xy<%H2Da3 z!CT&kk<09T2Z6P2RE13B88~CmTPX=WY``RhLuptijlOQ8-C{3Fw0-OIzwNHpIzY)Ssn_E5_qY@Ogbn^?*xyJMvx=6KH=6`*Vs zO{T&1XqQ8xHwh>}u_v^C<0|TZ{;M|5uA25y#kDm}j{0*nA5ltbPIBEzorIOrxL#SK zU}0W!iyrqh3kg|SYWRw49WFxd!6~p>)~$qEy;}H;MDOO!rjUe!RE^O-bOfjoipv`c z;wcl>lxSSEWnz;Fv@GqK+~;9;=R|abIJt%OMNo}EGI2vv+MF$wg2a!MenZ+v+kz@K zwHhXTWIwsNeN7q%M^=)p;xVA+st_N-AfA%5n86cAoA#?T~1B0p28>gAY)OrfXQmdV_xq6tpIj4-IQ(= zay$lwY>`RxsS^yjdIKi-r65-mOu;X;F3#AuBr_7O)VyzLk6L94sJc%BihD%iDxWK# z5YeE_%wS!(C7>Hp&G92=U0DVozxWvm*XOX97k)+2 z45(z|wz&mRt2YR64|>ShW%=aaS-r7=b)M{ygy~TRZlSM(111!VhR}@p7>_$9M z`{BCtLSAS(_wq6f>%S1WKY`3stocx$IGV8VX*_O>*C|vL>N~#{(o8`S6wfKP*6^z2E zVC^3vRvgPL>Qt`DUbi_PIEL!m)UokY|t?`3ZxcY=h1G0$ItUgCM>X>;-c-I?0*waxxy33R6Hos zes$^bqTCcuGZNB7k)IU*lY%qmYAt{{T(c7=;dADTi{eF!t$3QL;KEKM6Q^mVx_z!% zbPGrNfRgP!vz-S~b|G&90dJ3OJXmN=~uQ4LqYXH$1_DQz`^cxplVBr%9}7UHDV$~g-A>h(2nk$ zz~b?{{@Nprx!${fD0QHS8!TLl2=3D4 z6;=4{ZMjIMcm5|?pEPfIjdYs&u_(FmT63DqfF<@%I==~!->V4Rr>^x=C-Ba9X|VXP zU4Q+P5OO{JH~M*A1xa{WfIkYTsm{j^dG0h&ll!@kTSpbvyo$JvionP#iIlDKY2_e1 zI0{-yHk@y%=^DZWbIPuZm$%M5FZ(#S>3b3)meOBp z*~c6thPpkL;(vQrmi>so$(=?*JiipgbBUKI;Xbh_&c09E%i}q9FOb6hFMb8}=}A#u z0g>O^a|Zu5sSd7kj0)-o@>XV_9h`cbi!sdjXQuyfug%9NZV_4K3EC5eb~b^C22`wS2{37#tNqBBf@Nmx zHC}TT&%l8MYl+u(D~%d=(u&Dt2L4`7uc`GA@^;{~Hl9Dj=$eT(wNP5h>r3LH1dYYF zewp!~?DH+D#QY1f1`;z$bGqe1Q&+*|wA=gw*#o#r_r*)?9+Wk-j&J@R?mtRoTuK7! zdfdvs={<8Mw^x?Q8^N<}*(l7(%Dch&z90kp%O0w-_I{2BYX&bRd^@ty^P{^)YlmG> zn>$9?t;lyLu4-mNlvU{RnlgL zuuNUAg64>rU1>9JMOx!!{r=D0FMcF$LNVQ%&R#u~ughy(P+c!$!Jgiyt^R|nvL2!^ zR+V?{^6!D{@nWPx373GcPq)*(AK(7=gmtdU@EgAu!23HTqB)7&8qlr~8J}=gv{Ni# zgZH3!!rRR+bm5Hdu?L(SRRogxA4zuIfZvx@(#DV}1J zKj8o3Q6yDlebw9Z7;J6)1{1vVJp}r}BNE+i59A*%-XuwfF;*8CJka&&j)$o(5@RhF z0`vLU=RHN0rL-~w{Soe)R*iF_P+A=2-R)iBc6&h5cQFspQB~jcuFSp|>1yU}voi$N z)IX+Kx#Ih%$)V+nIAzHcg=Z2?4jKj_zfx0DUSp+u&`AFgErl6F^_6q^WJ1&NojLIA zsl7)>ZJ*3vOiu8wIXaq~Im@PK(G!J8Xdb&Vv$E}Au8bC|K=}Xyg*BvgUvLrY0 zfaf#b%$t{=#B_YipV9d#*OKfF`KIU8Dm2)Hca_?Ak-wnxb|PqHX!x7TYG|vsUeSrg zxq+<9%|0u$JlaxgIZmmjDjrcpeYIE*5V|8gH?>Lnh<-gHRtE^FIe`5Z-`yL?W{q!y zTbVtl-PFCb8B(_rgmY=?pbh|PkiSsjN0r-z&vt)~2mKI<39P(3nGaPVe>tpi&_ihx zXkb1~#L}k?m^BZhJT>(BO+@qknoQ6F5{GDyG!pv&Wt;E{I8U;}*|*oCF8AH`3n5TrbR1kq>_!9rg)__|Ldahd#qFT z7$5(C|J#qkys>2ei>jC}KFoJXkFSoU5JhIOlYIoxGia$eZ@Bq(BbMU#hn}`N8&W{3 zZ-%tnCe{GAWxg|G0TbgfK!GX;f?V+BRsYY5bXw=JDOmguP%OssC7dkm$)@LxfgGZd^r&lLCi?SIBnD3IWnZIJH~ z=Aisz=x|ZP1Rg|kxio#p(_z-Rvr7Xa3m5FxN0RsS?uxlv85oDEU1Ol10N!wxa$#Z-yAUp;AYrkY@9!y#Y0IR)~v zQ8+fk`8c5Sd~5s#bXVj-bc5QeFlZ)c285P!9kBXKW1 z5m}B`vr=sQ$Ey6Y6qO@4%IZ+Mw-iK!e6BFB`#DzNWAwJG&WV(|?8hBrnf6@nQbk-L z%-8A=$T2QJYG&ARmL`V~v&39a@o*^$U`tl%?BvKx7!s3x*qcIxt5C1!^6q7JNb)#U zPK$m9)0slha+vfrCSKufuC5{p%BBJ;g>+w}8M!CtW*k~F>N2WxB$XWL$G&lcdq)xu zQuhL4kyb31esv?!AjEvd%BsPvtIyO#plJn2_SeNyFpg*0dBvc8ge$g*Y(nm&Ev}`2 z!ldE{-1XHQh1OcLJ#a(RxW#R76YB$ zX~WmZ3hDT7Dtm};lvTVDEP3Uah8Pjv=o$IX7|K^D;UUN$ZyX=Cyp>5C*z&bdn^8svY{y8EAIZ>QtYu-OzliWwVAyt`y z-3*qA2rLtiH;jf#KRl)OrD?l*@>b%G)mi0PbNJ{r95PFffL^{r*m_QOF(;qac_}7> z&vvjZ1s3CG#K2$C*=ybUQL!*!jv$e(G}p@c{rOKw>+&zf{%KoY5|+4A`o@d?@+^N+ z6ohlg&~1pN=$ZfzhGWT$+|a`l_w`|3EB z%g`@^p0bW%4!O;q^L%2JLE}+D6DT1ROdFF5i)S4O<^vIZD!Oy{#m(KPm~z?ci@Q>g zVm$JmH)rP^hG!q+T5AjSZ{42jWEtkh&S@f?b~_%NI{}=MTsV!;!Ok9;K8SBn_F^8w za1^F=L6tz(GZSyKc~=Mbs5q zL`PwU4^0R2tajot0%r5NvNyc?0d(=ZmiVbXmh9OK+C6apHQf7eLT7_vHr5wTm4UO$ z51&wE3x;i!1HFyvs!8(}H_svOB1K(X-V(}0ahL<>fg`3NsHQJa?TDw9?m>fo-}1{@ z7s8;r1rOmXAva(%YYShMe|i6!f))y|tp|pSu z@}D;#|9|Hlv0y~XR2^z^;(@!1Fc`1a6%)prRRtYcRRIzPg+P@=mC)dy_M)Q;seIcf znd+P6t~F)(XPOr;G@+YSX}M(kGF^0Bvj?xV_G?a^owF^f8>fW_?YImj@pDMDPSAzH z6U#px-kc!sFuKWeouN>x&-3y}7oCBcCK!2sxM5O*9J6KzPDHAaodZw|=C*jhzvPT; zB0I0s<|esAQhWyyA#|$a3NXx|jH3>;0?bV55RGEJHiAUwYXTBemy@)dZ!Q7}+G5$3 zU05eB%DYNBFoZMY-Y_OEU)rq}?3>@p?Vn&%&rAbt1Q4Y*p*bHE4832Y7d-hNn4~$! zrGJ|?<5_m1Dtf>_%jpnaM|?0^$12cU0h=5Evd9P}ysXLrv-|V7+w+9<@$(DtYbXA% zjmjjNP8EJ~3))*|vjcRGj23}@*S4LYrA-l+o%xs+3wm66_ok>{1Vx2RRs)x!bjzy3 zO^5ourz}tSDd13Hz(wfYfzt=>D37R;gz{4oBAsftc~tf7M-;VcYLGq6t;N$+loRHP zhX>n#hUf*^xlN4cv~LTvDlH{Ux1d6thyB*tD)<)7f-l$D+( z>+{x2_&c*ZLq-bal#KxozY~jwyk}3SdK_981F|*TtEL|O=4$+4Ww#VB{u*h)0l^o`?!S9m)%!z6Ht>H|hZ6@J)n9XLugmjK6XTwg z%E(V=Fu;^p_~Vs8`z1Fxn(oRfeF|J-K0hukeXG1P5Hu1f;;J3Z*s!29S)P}6NY1An z^8Ob>^8b*hHe6K>JZUuSD<(m>23qxa#Cntlx%Trhv>Om*k0>1sCG`SODNv-~PpO<3 z(}ZG#`5;sCFkD4z6oskfMVJ!~^mpG@EZWBhk19F_kQRTI{l=RcATpLY+Zpe-r7sLa zfWE{(keDdo5`?MXZt~>3`@H(5rqwpW%gotPW@WUL*?m4-$7FNH(2JEaz}@C&;()Pw z2#Y8Dh&H3DrSJs-^wbwesS2-xeXk`Q1;Psaq3TrO(4)OyB!4x0YWU0c_5Ljz*e7*@xV(JnLNUoTM@#V`b89G0@UK6 zc$i`6_=nzyBoH$3`7fO&Hl&QhLz&+|1Eb&(sd6M{*gatdM<;ZjSBo&V%Pj}-(?TvG z_-8yt4MY(6&_wz7CUdBW#39<1u31+4fwd7DLc5#bQOEj$(QEB0=h#4tXI)b|FJ7*F zFZ86qA#-+JREI-GS!yL{8H9w?-jy(jFQ^w91D*sP6bcz9zTT9*d-{1H*VdoBf3L?C zKdOAdOhsT;qSvfA+jhjvO>hOSOV> zXQ1i@Y+SMWMbPTZIru%{+@N<@T>(G+16jw(25rc7vOqLzu%}U&)b?(14%(MxIDTkN zRlhB?HRu=|zKAeJr|?KAZdCuAT;gfFEgNJ5E_iq&^uSanmo3DPQc3Pj|Jl{o3a+xpqxM1YRTDF18DyI6ba*xw;|N0O zOPpH3e7*y6cSeD&B>@gk^BDFpl? zA{6yKAunwqZ3HMI0UCGx)kZ-pH%B3@rvJ1;gxakXT9!Qsg8o<4ZLDb|rmNjCgyY{_ ze?Og6bLIUs6bH=fBuw`l$r5DTz*=3kwS0;FlRiTb-JDkz>^fBtDo$TA z!i#sCv4`hO_i)ufBcc24)bmDN;UV->Ltj8Qr=jO9FEBg~C85%uc6CZhsYx3O@+FCO zGxb`@XysQal}3-J>@I8Mx({hDx~8`Dt25>^N06fob}Ki{ zpwb-_LF+MRH&KVu05kn)!QnIx29MtY+r?21^0$*G%cZ`Et$)n*dHIi&bX&4_V<7R` zMf8wmWRXDAH%C2IKe?CVIFRWJz8fw`Yio9$dS*qW5^iGSc`!-%v|Wd5tc(oMPS<-+ z>Lv)zk73ea>;D5r$_PEmr=v=E)l<{ROizt_f-^`)aFxH8>M)PtmB#(jxPCo&6{7C* z3-H2(r665=0MTl|2!p3%5C8hR9Xvqk_hx?eMu91~Ptd~#U8V)pLKNoJJdAhcdSdF& zEqb>AL6qs{7Q7RDA!LJYBW#Bh9#mC1$9stls#MZWldXXLSsR5aIXVL zBufd3vTmTaf}d7~v;pW-O7#j!Xix*wWgMfghfWkFzZ0G_H)rdby`Z8U2`3OOoMdf$ z>d6pGwo8xTfIJxFQYe)e&vqn>HPv$N;;;5hkqNUm6MW zB%u2RRx$EvwuO+Zu>`SJgPo*C=lKy;StKTFoq9r#0G^3H__q&iluPx}3_|=G)N-%| z9F)XEoeOm?Qi8LUinvXrTGjwztQ9Nk3R%Ib#tXBj&2x6zYM=#p5Y&Aq%$q}}KK#w| z@9fV+&qVj{&A-vCCOF9-h+;Ag)4D(=Tkk$?t?liN>Y==w`!@Lpku$K<&1!>d}|Cln^!T0@#^4f9iNulh9=EMzBv*#s~Yn)hHI{guZP!b0aW>nZkPTjrC? zkcopBn?#+7qiC18s&G}E$&ww|fdBFz2;MthyxW95%n@xcT_KG-B?kIfIhlt&4y%`D zufl2&1(2W3VZ?DT8nf~IWwBHqkIy=LDobpmH2)`rD1=XjD=i$JU8kBL#x<__BAPeU zrG<1ocweHS7h4jX&%H{8%?U)KivvIB&t>RB)2Gz4D9p2Vjr9;xWW=*xUA*7ClACzp zr6PcMA}hpwYOiq3)p5l6o7WvG1>0jFjL47*C7NaoxgPxnVmlgfqf1k;Zw%lTp>sk9 zW6f5a<$5`eHNG#lV?60Xjj~`45)8Q8f*~ipDpD)!_%bs`!24>Q(}9ZAK}+ZnTp_#aKCdjz{|!jZtkiS3aEa@ z8z;VACqZnS;ZeG|9yYm@bkz>Kr~I^>3Ivu7x{gwf#=? zGwAa|0`?qq3dF;_6x>#&DZlf6yiO}w_3ZF9T`SQ}_>%AMgF1%$-)`0h-CvF&@_iJvgwUdbW)fi?xK%BCMJ40aDh#|ko$)v7da^1d(4 zcNFbE%El9Jd#c&QF9JJN{meGhT^dh%yA-q52i zVyRD?mGJZ!)Sv-(C|dRPNUY~3l`+2;mnVBGWK|fhK_)S+H~XwQtl51D7l6^Nm?HoO z&7C_9XQ%aSvURmEmxEU-C|YOrR=+9&Zz8a~W1gc#NYKlK?FpD1bldGbH-k3A|zpAM10tDnO9 zC%_3m^#;SP6UA(PkJWZ0;;=oSzje_hWwIsP!^3n1Z^=`wQBc1Vf<96eH`fJ{xNm7! z*Uc&U;ZcDr8Q94q9ibJDxxm+01XsdbLzcg+c)?F<#o;`hhZo{KXYDJ;`d?;G1MC2J zXW6@Tm)ju@WjGUOLe!=y*UL{f-WG=XE+7_^~LExX7e$u z6iA9JMw10Q+jWHC~ZH zgHNAZGsgG+Cbmt;d{GD>ov?E@3%o9&Wr`koe`TFHyR>ZM(t(UEf+Hc|0U}`Z9ao3V z#TL?RwFz%FFx;ISH=+Lf6mmNMRRbBk#=D5_=kP@{3a9XIhyS_Fnof=CDqOCdPJ5=I zdt=eSDzhq^hqp+Q-dRq{=s-w!)L zdf?k$d*%rK5Dq$et$>eywk*&HP!+1KAwxDW4kk%vikSRlQIq(?a*UaLT@Jot409r z;Cm18lC9@WIR@1vS32qutJXB$mA|BNpY{NesFp3{|4Wyew}d*E{j^rZB`G|4YW$rz zD`F4rJg|7@WK(vM4o2Ll6m(sMj1WP0U!Nhdd8Mv+8hE8hC}CCBi9<;MqkMtb3X@WxOlDu;m}(7Ou0s zkfrjUxC`Fc7?%NM_bq|c%eDndI5WMJcU_inHx*&dV!gGGp)^5nsTmT$@cL#myFG7q z5S_UJ{=5(G_H$SP4xV(WCKAD%an9{1LgA{-C%}^|zuZajtXy_KyUdl=xdTUv{Jy$T z>5&UQVQQjR?C<=EllX*1!=69cia0Ixa1?l-2(oMY0AYzCD1QVH%gtrO2Wlo!O!E*_=NFI`@UtyY z#^peZ-40>{xw$-k+s$TccChNR?0@RqhU9fHmq7}34s|-`wg+K?Oe<**{U^a4NRlw$ zOlFqS&`@}5h>W9rx0J9O`FksNfV-hU_QqKiXApw=a$z3_099P1<`NFHO2RbV_bhv~ z)nO=0JRZZ28kk)NXmAnVdkH=;YyVsKVqR%VYxEVPBZ))Tw;K(BKwat4-AYi_%pp7L zNpI-hwTQEt>_5R7%ut}x6vmU5_r{ulb_-_ijEjTx^;N%s`$Prj%_;jHXYY4ENG;JH z{N-?aI-F6D-cQ0$96umeXlCdvnj zesmXN{H}Zyvk%yd(qowZisWxtN4UytsR5wnLBbU>ZExF)As4F;o1dEB5d^^hG9IXl z*3c)#KRqF*|IRtk-_u1$F-O|<&XKkXx|1NG1K_aJ!Tgc-5@PD>rtEC8yG{bRpBi~O zcxXpb`18H=koDvnm{^+-E~Nw+Q4$ z@N}d$28jaP^qSxd~@=3_E{%-%@3B`n|BmbUnj9&9afD=1c+GLS%jw`6Dg zIGts;m5%<7S}PnTVaS!R?j{xtJgX#|2yO^w0n&8>$V9sN(Dap*Tbx4Fyhh5lw84*n z2M%!&SR|&YfQEM46!_9qUvO&S5v(aIk2dj1CCUuEW7ijJa!~T3+C_@xlV<;*C$y!4 zBLL9S9NL5%%@-G#8bw3ZHlXgo*87#c){(1B$0p~R4+&BjR4PIH@02Y&1LUKmFJt9& zh)4|PVbSR@*tqLSit*`;pTeYba-)5)@E;6EtTG~%YpiWlT5^)@L`IytIR!8#th-cv z@Sd~X&nwweK8PGx`~?wFAY}Wak5}}>A$+d)?)#Sn6CBbq!9#c`{kAi$3%m{&Yx0-| zXFjtEOUS}*&NUYi1lr#VKXvLpO}+iq@{gejonX==_talcXfvm>D(IS&fhBQ@TLd@= z(s|VQXR(O6RtB~QcsUTrin7N?K_A4TjX`&ZGLGJkT9YTCNd?K8e{*h66JqEEa~U$n zM>Da#))_)mzvk@56YOi+Ocy*V|3>iWn;j->_TZHDO;71!CFW$jB3u9p;4&A{^f7YT z6BYpnWEpfQKj~-t8z@?>TYsF@SZ1yuc*ArWC=z^kFPgT`b;Pe|4fO;e4*+~yksRs9 zO#I|;dqgs@PpmT@3)2R?<2wjiNWON78_j?>)@-3Fu+OaOgak$;^M5QKj-xzp!mj{H zNURhH6P1=&ux}MmK8unkQ;>pFgWypj&=~feObgId!99>=Sg>Kr5`d|%z_X8pg{A+G zop-y_3g-@4zMw5_d-bINq$5jqdpOsLeheZnt3FpKpHZF_K&lg_{o7OQ$~hSgCEP1% zoLt(`5b%@XcuI`lTpT4+Staj({lbF1sq9!Pn20M0;kep4D=**^d}9W`KgMkk(zpa; z72KZ9iBf0SrPA?~@8N(FDQ6x6V|t-+`@jQ`&1!t?vFH~yDu8GF;R6%B=5L_Mv}2x7 z+-kKEcrA6Gg4Au17HZ2RI8Mq!z3=S_&i5vwjZzNDSp&iJSQ2w=b`7%YM%q)KWR016xQh2b)kHst;jJ}Q+VtGDJ~2gi0V}~2eN3<9Q1ZgY2mIwow~rZaE0B?ul~b{6~wsRMgc6Egg5@(b3DGPdP8;n>|A(BT(u#sa)UKJUH364#YI7AV1o0hZqkmcX?y)-)gGx>ux@19nn4o1rTc@p` zZet$5sO)N0svOw-H_v?(uCuMIGgL9y#i&bHaPRoNu5LfLZH4c+igB6kl$XlXQC&CM zBYFNOh{BkLfQ(z2xVJdxuIzGZh(%mJW0nmw$a|l8EC9L#y0?CU9(+^}v7=B6u{OOYWQH07i+@rmf{vluYhm#RL(8GB&?E_5h8)qR zyLMjRA8OIS%17ppGB`ZB7Aopvn1yj2bsg>niy|I7fy>QkT@T&WFOlE^yI$!el0aQ~ zM`#>BAk_v}rr-<<+ANCzIL807ya(?07(`=i|Jy_Lpc!QL<)KU;uDqkduN&*+mf;Wi z4Ufsx+74G3Psq&@nP%+;9iGO317pIx6Q4)Jp~3$w^{4WaPqH3u88(S$?a?T6V`#R{ zoOTCRO&KUI!u;>VC2Q*hliAV_>v*SZa}xS$ zP2ONKqmP$~Er`>t2HeHQA$e86#{5dMya^e>3S>|7u~R|$R;4ACw@4vyt0(Xf+~i#y zzG{;G1=wJ3_SNEM3)^UJBX^aOZzloy#E7?#xuq9ae6a6mbGTKW?y~EhOYs z+@P&%sNP6eqGz{tRPu1Yhe6HPotH>|E|vG*BAQpc9BE_T>%0giK5A}cFO7B!ih`2_ z9S|6)NdwX7Gp*P5Ll+}Z=_2*cd#o2m}!W@}%BYq!J3n$$xkD%)eP zIPNZh$Fw(2ILT$vhCFJ@tR&tqI!KlpqANOiz(nPzSkb~^iq4siJ=qZwcJ*4ls-PwU_qYks{8B*b`|rQg=1bh?q#fIe?#OHaoHhPu?b9&rh)y zq2i`}X~3UYOX}a(1PIF?OO+{AxN0=Yx|VuDl@&kvpdGHEaSUV^WeXb10(6e9k^N)XcO)!3c~^&*ye#&y4*w|t5}Gi(wRK+Cu%k8uk=DjS zsf>OrM6DbGyj^s&d0_|m17ncuJd+7POEW2 z=i;^V%>FujQ@UX(tlwNOUBIjqtct$#!d%_OfHzh-{3JmV%}QkzakepZ+fT0WK;elH z>~NDsy#MB$9U(1G+f*l*RMQ;np6R+?1qb{e26&tN9g5A*gRCwZUMs1tXB9tn{(8cV zF33C&j(8^kd>tvujh8*0QG4v%ANOZ9N_5JpPl zyYngnUvdR>gG>yYEzj5yTvoFP2+d~3=JkK6QPJ)Ul>AXoNApL$aRkjcH&>d~Doa{r zrwySoY@p5tz{`jy?`kFIpO2oObZ&a?n2enQ2n$*pl!kG~np=;%!MXjH4BK^A?!bT3 zncePdRCDay)q5El8;{NWg!1`pM^`|Ks_yQ@aY!FebbfZ?O%GQf`yh>>BhPkf5-bT* zZXfYMO^d+r(tbS5h?#YDU>tKa{uwt0RM_~J1_6F~OAf`K4LPt0awL8ZIW~w=rP9Tn zKeU^AEIk%FGtFxl5{{Ldev>EV7Ed_KL4Kie{f`U3+`fgjH$)6K-Aq(w#7du#)<`}j zZ$D#nTNY;PalXxTWRKA3mmNQ}Fa0BYR z^l|Dh$|}#BH$XYGvZOB-a)boKly`)N$uDHh@TcYW#gC4gMS5 z^t;!P__$Z=2&^4TmXEmtrgyR45hDjYd1UBp-K>~YZ0;xlClKc?;BYG#F1va$>RyMx_iIRNJm_~_&-po!xCXtZD1rRlqL?4SJp=qqw@ zzgq)xJTTTQe_W2+{Vt*dV%6Ih(HoGVMo${++PQI$eafPbZDl8(DBgi5FFQvKDgPPY zFXzVbop{0IO&m*v$CHY7XF#vLs*sb=q4HQ(mRzumCJNTkQ0X)s1d(TjXZYLdjU&It zK53ed6$X#AdcEGcxX0gu7OC7GSYGy(rkumOaQPEQAILH;ekQH{QV75 zmDd8sac*O~41zkH3-qpLDYqX>gS(vC05SejJ97n1$~O5}i7#rk9EmFIG};KTmhtCpK1#(LN2pyX>yG9ym?L?&l-md1 zd4@t?gdoO!W~{7 zxN4SIA@O|c(>T{p`Z#MuC1D$fvUxe;P@-(Zl-rQ^H4`4$84F6`-Hp*66YhYY=o)^Z z9Tjuhf9b`TukUXhi73z2W&Y)C!RI_uR{2@;59s@Lf}=h95Yw01?C%adh39c5nnbw$ zAnx07?jP%RgvIM}hJ%V2UFNS4!)DxXpQG2YnE987@1G96U@h1ZpS!96({_g!Ktt1R zlX9Qc?VM;1p%dhbSA)v!f^(_|8|-Jr!L<=|ns2e$pQ6|haa*^S=jPBr0V~Z8FVGs& zhqoTUN?Tbb%mPs2%ZXD}YNY7?urX9D+}xX8NS`en?G2%Au@sSme=0YLlx?C{Im4qm zV+Gtelff}%0@ zj^8KlvcrRnc67OWo|InNmMFL*iRCgy0}@e>8T9=0AErCuP4q$Wu@SLEf*n{eTTSkM+J+*|-*- zy%<)@omp2{nvcn#5;`oBHz(lE&%R&xatwnwrrkvvE{5@M8qd9HGo;UTjB9Itp?{$9dOcAlUS;A-q4=$1ERcdZBN z{gQoV1=ucl1(EN5bFzD|s%mkHd5mF8+l4T#y{VH%%T66l+U?_DRs;B~T|%{}ObM`wT8S9$~~5{b%hK zn%{|c(0fv8phKIjeBkMMXKRxQ=O$=>sFVV3Ap~fS_y%UCOX$pcmh`o6Q+Dbk zvy_c_H!V(ytWMnlz^K&mzN{W99{~R z(}^^6(ne;N-K7 zOxtD1cek6CIN47<3&qFoz(Q!1Ssc7RbmK00T5t?JJEj}qw#)BH@%3{dJcIs_y2zj8 z)Q#&Q2Q$UEkriy(Rzc@IY{1O!@+^e_oxZt|*E+^4yJLqn2%unMJ3PG{^9J{~bT_{k z4{^sb3NZ)Y<|VOifr~c|&4YE;;pU?#l66-X~7c zw0uLumjA4$9SI2?Kbqcil&pwgHBU zb46L=kc=%f!0XLalD7Ti!S?^G5cg4<#OJixGnXOf=DM=6e;U7Zg=+kvO?GaHL?8AR zmSbPS?S^Zq5lLa0Hm461uAa1Q$gx%UUq(f%wIh)=IUUEkWp9eoZBCE34gDm=Jnlub zc9y<1?BxdEoL1hBV+v~^D@sHf-WBWz*oKM9VzUEby-&`5LP3o)eVt9HRA(3d56#1; zwCRJ#cGzZqJ&7uCaSBYpU&@!#u(`&u^?%(v9Q@Lx^U}gkK-Mv4!z~uTNv^cUmr$+a zj8z0yhtX40e75WmUIbN=o91gRKq}!9hFdpn&2sfF;#n({DZ_@}`h*s{W7SNZb<3t2 z2cGl&;&f;+1*(`}CV4n3-h)3?a_63Sg;@2;; zjvdrN;;*IdN($?np_n@brB5iUyj$Y76cNLFwuE$kt1L-?3}mK@8Zb^!$-d7loi%xTrEBJ zob&KGp`gTH!y1{IbD(-JXi9i}H$vxUxLN*>+X`P^_H$g{<``Zt^St_FucGdz9qy6b zW53uS@wEev9X8b~?ar$Ook=pqW49XCn{2e)v?4NA9dbf99>Z0}Sr#oUH6QF;V$Cy& zt-C?H(eh@Jg=r}yn~@eNXEu*#>#k3@$Td`c5{&9ujYyXN2{#b(XY;CT-6@1)cT)L{ zMlZywnha{#7Aj4qr1fe8P1hx>>*TdYy_Y*Ne3UdFsB~*hB>E*)y=wza$tA1RW=B+( zaDnTx{U3JwVdHyLsk8x>^o3~oMJ#&zgQmrRFi2T-eOLZsWH!HCj0o$ zGfVWk6L>oI%pP_9wQxIQ`-KDY9w?_4#znx5fz86!JRKJ{6u|EW!JSSrObX6opt1|+GXir++ zd_?2>3Sy~@XxIOG9X`Eawq(HeZO;bvx08=6f>rxD7MBT(y`LY(a{8~?^>x_3Jx)YT zkn#kQq=RBr=#HvVhR>?trY2#HbRzzMPT7mCREnvapApOBucb zlNpC;II)OmW37dRJbw=-Tf61peG(5>a&4r#WOYu#NN*hs1acndDCS*6ERA#)$v2pn zug0v+OlBD3>kh;pAm9j>%BL4P_I=Tj)=lOA@f=KF`yJ7V>{=P2L{|A>U47ZyfCh!@ zN&g}X``8r9@O{{nVVH?K5rtqpmlFK_VA=p{dBb9@@KI^PqXw?jxMW2U?kqME?aEUz zXXY$GGLx>ivX+}x$xa6zYf%bbD7--8X}73%PUfzEyk{EfhnFi$pcl(jMOKbR6 z9Y-*kQ8;R*L}tG}ggcs9NIeq=Z#Cca9D1OO<(P!Y2sbXig>36|_m#}8s&@5JMat4V z^JVV#LHFqt^>B==?uhAokpNyc8KyA@Wikn8 zM3Y&@;|b2s;$-=ku+j$XIrg$IPwd``W+^&5J74e^eo>6CWqjFdSWC~GF6qZkYw@6x zkl1u#-p~?WbF55ZQ)NuE^PQ1`?GtT~v3q>grMs;|{WYz(8wRw^&tzuJv-0gfQmBSh z>dsL~-ohc%|5W6?TG3}lXo*`&>$IHS`uf(pmvokR%(AE7Yix3SD=F(`U?#(-Z|ySe zZ%dVwz?skX{kbu_Ry0Rdhd|TCrs$>!w+VdQY>oB2^_=Ee?Os_=<8;+TbuJD^vvja( zlduznzK?jCR%=cA6OzrK9p`#dGWV(7OLgR$2O8jlT$w>7uoKCqPzq5^bX5P7`|Rdg zytKh9rvl$gsHL}hVX{K?GZ`9eoEaVj8eHF;|Cb8BtP+1kNQtch zP!T9TJ&VehrR4iCTcSA4%PMd$-R{6J0%vJ!5Z1alg#puwsOo!*TeA&I3GHwsDs>u$ zTl)!Sf-0#e#dKeB2@IXWFO)hJF;fL8E(U8<9U%6dmDT*hR^5e=YN1i+$O~2+ptdud z$--{2L%tf)Yi7^9lvHw#P%B*?Vg>fMJ}_5r*+3hxRd*wrRh7yQ)`&niMUZTy7kX{$ zY}Gxc80HWoyWQ`HIhZw9Wxk{G{fH?FNlcK~6fG-qdN0eqKm!4nGCID5M93=e^Ye#Z zx8^X6;H)RTtM|sLWwgS+T{V8{1iqbK7fHow-|IeJU89K2!@F1~Xsi<+M3%9ImEYOC z5Xsd2#zt&aAiZUXyE1Mvq=WktHht#Ne)QXtP4Fj9i-fly)<>WvRtpfPNc83;2Oj(f z38%mMIV*o8+=|mLd4KsoQZTkCVTq)3}5)U zZ*U=5=vQ~nn_!m9H7u!(3%2qJmB#JL@iW5bJ6W2%*yFN&JWBc)H^ipGS@Nv)8fwma zXcviwRQBJ&hiit7O(dof+CP$n3?|TYPI~n2h%FBm{ zSPI{9ge~sJRyIW#KD4p+Ttc+;Qj%{+ufJl3IlraS!kjO)dT|VRzb`fiRJ1WJd%lZYXh+HOY2Bq#@kF?>v1i0#XHoMQ{27@IaTU>4BGCh&@iZb) z(JCt6W?-tdZR+q>v^1x6Y{Z3BS~m}&58avKleFytn`zl3n?A>fEDLZrh_qA>D_^!@ zge(j!AjN=!UvX2O4*FYWNUxeCaafH%Df-W#G2U|~kOC2GxL1n}`1J%-cFrTSYrr=W z#DL$u5jWRDx|_Yqh)xEOB>j!QVr4_RD|V?!YJ9bbs)|&DNg`M#f^lzZD%*_wfQZcb z9VycU{nf<#`7n_8oF&zC!3rC^ZV{ez`tIJdXnm#ozm!7rBT=!HLSeT6JR+j)Y#|H5 zg=U(nBi%b_wO-^%l%f=3R7!M$x4L1wifTNWK+KaQ8*l)KnfzpNijO|7W2W)8G*hqn z@aHGs(TQ2?F)YBCaR>!@C?nYn=^NkdqDG>2au=LnQ{018J~NZde&6EcxP*PYPW9n$#o@DhOJn*aXQ-FX=PYXK!;KiPsB0`>JcrZ(k{%P z607`*$}d??v%B_Ujf@&`guwmbb7BfmRB+b_6#YU$1F?iyhCt?8SZ>Dnmff@_uM$?i z)HaI35Kr|{)kyS(1=13Q8cZz_TFR21zoS0kQP!7CLZ6>8hTqQVVl(#ti>+rtMC^gnhdu}vhW(Dun45fvWg@r zI5*vs;uOs}kdv5cQ=(MET_Z#yO6V5iiJl{p21pL*J`~lbe0;iumDKGA4 z?9LvY3b7UTWqWjw9L~)jJ`Fs4vk>u2s+(=rpD%(m6|v}RQu&8wI0WXOJ1zRlR-^Cr zgWGvU2gZ^Hc94u}%5|~~miE4UG7|}oM5Blmqjk^kXlvdBbJe1%8+@(kHimCtSzrIj zu&!VI?_c6zk36torPX*51UpHdgvurPAU3z*MxT5FR>M=9wBEM9$m*f@OACz@cb7G`*U?x!6||$%PjO>ZU+h(m~5#Q;t3W-b_IR^fG4ObV|7CAm`Gh&WzNZMn5SR6d0MWSgDHOnugt}1 zMWzU4ScJBN9Cc(gEDw)Q;=OCL%FslvYxOj?vpQXgrveYb$g|+wJ4lOU?`CSOa>(qk${iS?@o~Xq5KMAmPx|;n1wlqZ1(oJI zg)g==0)6-TWv&`K0J{`?-#3cPE}qH<99)3p<{D`t4pZ32dI*CqSf_!_d4GZ(oDVN`~mYZ)+2*FAq`ST!3ja^LQP2qSYufd1o&H0i> zR+{fZHfZsfB4*#wh3tH2KSiu%lJ)UO&ov{iu8-&Dw(LkdwvY`$(*uQ=G9vyN;%~yy zcfBvyL1jb@F@dm{Xg`p6ay9vmUB?Oxpi;?fND&iR;;sHzLL%?y&g_ek=4S@zYMQ)rn5;^8^OfMf)L?a=Ec*@)9pwFA4P`AxtcC}}ID zM-l}Fkhe&a*g}%3Q?$I!%OM<1)WHb3ePIDIa(X^;2(~>R35HYvk!jvQruzhE2*mkN zOW-M#RLd(FtWL;s5Xq0VI+D9Mr6zbYtwT@E)sfY?AIllIu`Y{f>;+9EM;B}LEa-h- zbX!Sw9P9O6NN9U5KVwO&Iv^XObGK(E0);zGVw<3sO7ac57Dvimq5;Zau#VLU8Qv+r z+^eMw34&8FT?Fbl#dM7@o#RHxN4sH{n16+k*&Wn1p@anQVY$7le%UU_TLQuGVGAfa zs3I~)Vli4o!P)j{upQ&!_pMLt>%EeIK~t>Y0hG5EB33c@yiBr=BMbZ&eW}dy0HJi6 z;Hp81DI`ju2GT)ph(e2WiJ2aW#x{<$m2|SoN0%AF2ow`V2(gbNQD+G5G?A3bQ`V<~&+Q^kM6JQkXo(yb!3oWGIAaeNux>`VQ}F3i zn9Hr6SWcj0aWTt?gJ_(*q=MH+q0`Z5LQ$>K0O%O(FRsy?)=Isrifo)hN2nv-`9s4c z<*Y!^I9)8a-`o}u`GGC0v<45%LEx={RBLsx(qbmZ){$sl0&@gEgcU+H)D=_gsoqI= zJRid2YK!wC(T02-vXD!?tA;2Z&LEVKMeFdQ`f#+QBcvfelJ}|!@(X(SX{b68sxePY zgIPb6mfAkh*I$^ojVx?bfPHh_R}#-{fERg^+1)PdqEM-?hG~OzCbDw0)J&LVcpQP= zRSGrg%H8ys6)64H!5}QUh{X@;JqLKJsul@M8tt>8hb3B@GI zQ1wnKEhTkw@O~9S$9~wM|Btjr2$h#-jqAUItIXine(C6 z@14WKnZmt*kahR5bQvqZ;x;(sjY%&S5nAx0P?EPTC-L$Yk?D_!X~6xF%D;6Q z9`Cbxa)8KhjUdy1Q4L3bm_pBhDJY(G-4=|xZY!}D5lg1mhG1*)H`ga61mzhu$stV9 z{e~Gny=e0zLi)R7vJfrA#9EV(&+5puoq?F5?63(T0hU4D)dO4kh|KmR-X8b3Ip6&b z7&A@eL^+Xc#{LwEURBIR`si4%r$39xFaWsbxp&E~rv755Zx0e`Z3=#xxtaeKt&C*8 zi6c?IZktD}FVvF7XVUVzL#EQeN3aCPPT!Lm$KUq1Lp=9jMGWDRk3^s&!)k|?CoJ1d zE9&Ny&W|DDEO(7L-|Qi4xsP35H^+Pm&jkbWJSC}Q-i=uE_e9EA@&pb!lF0L72c<^< zUg+~w%f}Irgt{UNZggvzA}R1Ku*e&WFud$s5;5|=J*w~0OF}{99yI5t&quPiYbjHV zn);QldAuY}SZqCv1?!pf8@<&0iPIw&R1vb-T5q4C?#c!ae&(M|hQE(Osy;0EBf9E{ zDelwRds|_%6XdIrB%as3ZcatSPfH1nk}3pcjJvj;(kt()s#t3NYaQr8*vV@|vH(K| zl9kMvh2iLSeV$s<_k=`?O(xUxg{Pi7JtLL<4MFn}X4K}7b+wiE_@>v0dvEsd8xU*G zkyMgwG(QI}jiJg~SPg7R6oj~u)j>J76}cr**_K&tj4cu}{e8RseR|O6wV5%tmXUJp zxDb5xYJ3J9_+8Y(CJP%#5@n+Alpnw&<2>{Q6|(|8yo1Y<{mPmfLnzK>K3K#E^aYPl z`T&IcTjtZ+6nB=&nwj|xMxS8;_!i;n9Pee2s=cMA^$}V1c{ycKILZymvGb&h_HeC4 zjTIm3{)eztiM%iJyZ3#L$og2nddP~)sEJAKTZ1#80Q7gz*2f`GHRSRnA~bkyZ;Jki zR3h4k$z*}^5vGch6O;S&=~ITIIL6Vu@(9-Gs3`0XRWxIVEJqHQLC^VD#Gh=a$c-td zx5vz1n%MfYabt6eHw8M%xGsBJ8uB@;?W4p&@+GeLxIH~Lg?ratP}tJ5pJT=@XGwjq z!qU%yav$M@Vne(yMK_BcU7}}A5x4eg@I3Q-Tk;|d??anais?sC;pg=iu2RrN`)nbV zUn|J`)u*NSbMWD(qSEvTZ^B=;x?&N-Q?XHA^tgL^ia*>39R|H`l7&k@hcAxFS!lSS z{QWdyA%q~hJ83G8Ca=cN@CG$T{1u6X#CaV#^4(cOVz~p{)S4PXiTvOEqIZ>~;CFNi zwnAJp&3NrxVtQ|N!ARgQ5Xz zC(kN5lUs?HJTy$A%HLcflvBNPBn5xRX~e4J@>7;t19FD=KpNsxo$>$MU$*YUki_^Y zWE)OzsWZR<38f+u{o()c7Y-AX4HQpJt4A#RaIRq)v0TF$EZt4pPrS}K^N}kdQz$RF z+ml78)~5a?c4@d_#f2%vUGAR&+IXT6_FL5QVP^}pii&k6Sy`xD^6 z6myA><=5$jXX z)a#urr_hc6AB&u2QJm*;Yq&}NjG;oxx9|#J_bWi#-&&J?qAD2R8{q#$pZ2cYD=U=0 z|EjcY6Fl74@&F(m`;P2UNRbw`&7{)8<^Lo3x<(*>ulD1gYsP>0ORqeU+qQZyx#9h< z27yO97oK#yC0vlsp~!DQeE0-BPMKx6Lb3DJaX~JbEnnTfM!h{uPdR-41OOk4U-)?e z@PChL&4Uh8Ds2}m?!Va1h&u9Ny2J;Af%3O-J}mO%Ot)rjldQ#Wj?QLsqE{k+1oFf8 zKN4o2M*J+hjP}mzu9RCO1!)6!va4k$Uh^x%F>Sy#JKi(XYy@`ow+jLv{Op5V@mGWX z88IQ8w|%^%5$bS*NxUHr?i{>1Yn{WaimSbY z%@IxkUW^BN`tDbJI;(wl@qaMrY;@i=Bz`~orw#MEYD&x;Z5mqnCk}W*M3Vx3xz!_j zVWYc=@$SanSL${ZJc;K}&52_}R&kA14DL5-H@Mv#hsY5for-dwKwntE?#6Cy1EE>lj6~VSghcJJ?^t^IbgKkV`K84ZwU|>K%eL6k8|Bu=} zFReXYZ6)Q7PF7K6k8b8m8CB8*g)-OUL%!WzCs!zq{8d+Bqp2BK<&SBcZh+`s(4V2u ze#e}`X&s5|ZD26^L;h+;({1AL2MAC8?KeUlOk;S)n+I7EjS+_+-wi(EfsTbpyTs2g z{SK5k)k8)a)i}MchHj4-)%Z5_vi(jzNbis7&MdUq(>}Llr1rL7`H2IfK){19Hb|FQ zQ3H8LurA;1Dpr0$x4lw@ZvL&qg}Jew24zgLR%C@|p{a+1Z}8A#p`W?2VP8?1Zc5^C z>M_I3nwn|CJHxy%qVw-9Q(UyJkhTqHf#N{HnC!;TjvOJCR0a-_Z+=iiDX8WlcGC z^!DhNVG!P zvIdUkTuxVXp~%O}xsqIQ;=pHe{onl0>mHKMfdIj)xH-Za5i<&o$mQO8pP~l(UFmee z{(6!N%wrJH{?lE1ec8X@H0h~jHVzNA9W`;v6bhCwXqpUK<}A-HN_L$y(~r6ezbhRm zcpS4=ehAa6c8eR^-q##0s!wlA!*?|Fy({e-YS~wk!7Jzb6vlD;LL@7AAtc}aE8L?+ z3wF`vpK9F=%dw&DT;5jS?RLH7tkIpdMF-ZJg`>DD^d0cC>Rui~%3W)EvO;lmW&0ai z8AVO*QN`~1W}NNDt<6J`b=__^%X$O25wA4mk)FP!SY_$>_*WU--N{EfIy!&a7vOA^6XV&Uhg(F z8ePKE-O}#(wW8cv?Cdj`FDywLn`w51c9>zN*uU??Z;f9GE(XXHzv;yaCsLEiUY Qz`vh1ZTpeEVefDM577K5bpQYW literal 0 HcmV?d00001 diff --git a/docs/website/static/img/utxo-hd/utxo-hd-sync-01-19-23.png b/docs/website/static/img/utxo-hd/utxo-hd-sync-01-19-23.png new file mode 100644 index 0000000000000000000000000000000000000000..a9d73984e07b19cde0b1d2782063d6168f521ef9 GIT binary patch literal 68662 zcmeFZc|4T+8$YZ%)hSCS6-qcQQdwIV5l#!82uTPtmL%0MW|%QCmP%-nRJN0~D9SRn zm@#9`PzYHEGh-M#V-ztlnCBjC$oKc(^T+S?Jg?V%I_FIH{kgCE+OPL@eLmRpMrVXK z?${_GARv79-_sWa1U8Tb1lGOya}Dszz-F&P0fAMA&!4-f%j5BYU!FaC#{XBTfWR$Y zsQ^!a%`W{%;Qf1n6W67kJyhg+WJ44ed^-z*z`NR z{IX0on@14<-X?Gb_|tE9@NT7Gi-rFW-o70UkRl*Z8&o$Uz$cL>P}jiQ8piWG|LN8( zp02>mEw`aauTT9CittcEpy1>r?b)rrc`pUnci7~QZE2P_@1JM9zl)2D z6A}_+WMmi&MoCGDt*xz&jt+aPY#H#)8{q$1x(f)1koo_tijWtP6cG4Z;OyxW7jMS0 z;;bL0>F4X#zKcxV6j@h4r_(si2tcJx!8DEB zJi@Hnlo6g>q+_O9u#Lm^fZ9YDV$)<~o|8B8$y4~(^mVkK8`XGy1R>Kn!qC>c#F}C` zJxkg9ZJj#W@IWxp*VnR_vO6v228xG&pWup*(i$7PO0j-SuO6ts(q8mLRR&{`KRjPF zW7!n%dR%*QQcy>hZ4OedVd-P{@cFR8c>cx}PmI1ojj}F6F^$<8`y#D+)~v)vmOTfH znVHMBkSFz2k4T|1uaAL->z)#(PSfU0CzHOm;GymZRWjZ6ck=o9{!Xv1Ij!Sz1W3M@ zJ(+gQtTk2{TKu_=L{=RuHjbf>Shg2W%+w_%MbsAL*GtkxIqDM@&7LTqFQh5CVR_ky ze34wSOohg6bF|Kk=Dl%xkE}i z#Db~@KcQq&O{qO{MjJu*mzVT}B%ihp4F16p8u-qQve>giB9aef^sGY9i4J9 z%J|p<_uP9S*l61Q)O3wFob7+?*As?osSH~*?B)B3cN}R_3S-!y<@oWL$hKOs1y(f{ z%A15unQ^w?DSC@62yajJb)@dhY%kL5w7qCKKjmE*=CoD028nvpo(KkM_f77P4Soqj2{Saog$2Rc&fFY|lyvdIFUUXb1`tZWv?Mb%q0&LQx zvE!|n)qh{AVi>gdv{4(yanElhST$$t%wFQH~uyGUe}_Ef1&@=gzr-#s0_Kw zrw+HK*F(2Pj~Z8#I`UGBr*Pbj6Y0H)Y5Ys0$x$Nw5;6j~+GN-yQOFo0ld6iquMN2F zPrIKEs~3(mtSz(|em70d;cmrds?_T%5&0MH2qf?XR@Z`m&2<;co0u|fLg^co&m9Q# zG+&^_%q?!{u#mn2k$h&dN4ymR4EC8Bh};K7AUzo3fTUiy52l*Ez8!MG<2SNQ*D zZ$3S#^GU|yn?NohUxJYl{3`;Nd!G3K;c+QZo}8FQPL%KaCcO*Hjun5e)(6MYO=;Dy z>$g(g`6cUQ>+VNXs%t7VY`ffGcDW(qyL?cxxL2tMmG8f?yJwG!w>=cPGOH!w%cQIe+A=2gK^>CBqy%~*bG+QRoA?iqnxTyGn_ij zo{+4sd9CZq(}Q?)5{@gqGp|SIa&_O`_4vmxaHS(>R82*qMC#SQbl@1GLt&g@5&JtrUd{+c@E_I0vi5ZTrk_P{xCORO4H zEH7d-q6H&`VMKj0Wh&W%6oeuP2D6wQD=Q2jUX*d1nWKkd8WE>#7O694q&Ut+!{$Ue zzDaAF9ov-=wlY}p6-k9)p)#ZvXTd0OONf~zqKd>N(`TYgbzG0WzMnW8MacYLoJ}WB zZ(NDxO0qcN|C@iu5y=qD=<_<B>WK!9RM#VN^4o@+>apXQc*Dq1W4ywMQ z5^07LG~^R_M!Z4A83*>e6)lgXiCnp ziJs3|C;CP{gFPp1;*QU($q!svPflUea>u8~21vC!p;4YwEEoKsaY4eLE4drC2)|&W z{9D87IDl}B)UmZmki``ctW={+gJ+}ZyaGqv&SKl+wx?HgR%~fmc1L0LNFm^TD@GB= zCTDEcT7aCG(V`aM8mRd|U&d=RML~H2(-XXNC3(`c^ug`8TO{8I0wvJeIC1hZDmOT79AaKc&sKy*l(ilYDon&myzn5iA}4?v1Q_PZOl<$6nj@RK9B+; zw8hzzS{i*J(odR%;vV-%^YtJYv>YYbV4)l=N!Rb;niU1^A*(dBqNDy_dDj2o_w-?P zPg0xmOs$y7DXi(Tgg6}pak69}?%$X{q8TWfvOE`eCD!E-dwNkq!jS!l<@xqx@`ys3 zrqN~}N*LhAf6oALxw9Uz)*e2^2mRt(JA6ROIlKJ|^=D7-lr{S9=NCD(xIR+PMcp|i zMAX!=E%?h_^y4)(BR#9Opo#6ligW9N8gl*thYQyP^~49c6C^2FcZQOs&Fls6zO={)nz zZ{NddD8q50O*`Y$cxgbsO4-ATaXqe_A~roIiE8)kSwo?81yfF?^mNDo|<@6U6iBjlh_#$b{NMv5lE3PuHt#Kkw0g)ME zlrxO#7%iyy*In~e+-%5eAb2j^euA2mw7)AfkOd z)M!2>&EbS`m}f>b$%s0p6Gc`%T&wI`LTzV(rdQHfj?X?EMBV%<9y58p7%_G#(^}lu zfHn}a=~+?cbMN?VB)_zY!?0paqd5(^wn)PGE)T`(pc*c{&H?{q)9;Bu9a07}*=mV$ z)p9hWpmGh_6F`tJ@f&DWMvQ@^Z`JGI`^glkNWY>7J7ff@=68R$ZYN^sJD4i<>CHzYkaD+ zG^Zx|>yoxs?Vv}`)Z4LFX?D?0sQ1Mp+p;B;SY%KQb!aR0{Z zPXYXJ;sbZK(f3sW(hK9=;`_;jQms%z8wO&q&bO_$|ts3 zHej}|T+C7<2*Wm2Fhguja7|wEaedkze;^^!oNT=gbu{7F^791!74rlLR(|Ydy0bp5 zD5YgOmu%)3R%3@H9d#LmCmZHdw;(E@@kcSrTW zd?{xPX;b;Xk}f4zwOJopoK>%5i#If#oX4Z(K+Vih6CMNqL(D)L*^wVv0^|iDwni>E zf=!I@*-p+251Y!B(TRL=!G@KxhyU9xkZmr@o_y0ZGySFMa7{-h`v9(w>p1Zxa9}{% zHP7~9t;3Gt;Q_}FE1C+-mS4#JOvRglRGj-6TpPv7j4`(0cRh&H7ylRkVK&U+bnki0 zv{&_9k7Imd4|e2i(#a{{aca z%B5{F84_dR8Hd)Vy(DPi5^NHJyNc_{eeIan;QvXOEr7|crA?-hTP9!D-A{Z!JU%kf z;t&G1xl%EjEd+$^t1Fs3UKznzjXsc@W5JA%GEG=Jia-yOn$0MfDtSNK$`!pgP--kn zyl1`5%`rqI*}=AQRM1;t***P+qU>1B*wbI@9h65VEvj7 zAUM1$j(BoJ1t$10(9!8Hp?N7FAh+2nPGrAQWYhz{1>{9s+)B9QL4SHnFh;mX- z8>V_Ct3cFrHlyWJbB6cZq$c`$KXvl-tWE7_7)*ylPpNtML^5maDy7t z#t$MqO?Ra!`1xfzYdXejJ=l+s{4RxGog*3N*%(u<)Bw<4lIyM%Y?{KWf-fCMCH$BL z2p)cZf#HYn;YwJH8oq9!vuwKBE?<#f+@#Vk=eQg{`9r*U?idGD zX#jrOhflxe)yQx1;XC)TBOU8^KOLyY9PG%X>bJMnxF!v*Vv?dmsURsaYeq*OA zjQWo!X-4z39F!0|kMX&>ivR+9*tK}4^0_GLBxfM+I^dU$H}_YhrOs*i1TC!t+Kvp) zbHe^@dlYMjJTqRxP$-I?8T-dq`>+`^I@g&pm3ZCG{z;0n^02nA-xRT^o5SitFXOCM zA^RLA_wpL|FAY*vXdev32qN$w3pRGn#;B;U+5~uh;Ik%`!D|<0^P7n|e*B(vSbt0B z)RekuCw+8)>&IKS8u$-RNP3jbP=Rle)y4mMr-3#&rNAQCyYmp&s))0tZm9-O{YP}S zFb-+!JJvfWGBCih-lXZi57u8jWVcTrOudJzxbyafF&&2`X>E<3WAgir?dxCWqUjdP ziI0Yr&VU44qy0nJc3x?Wb=gUp4?ZaSNYqQI zE`woMkCBFxF4ClZ>vzz7`>EFJHP16yqoYry#PVGL_q_zR2z>@6c<;Cq@_5Yyn2vo4 z!#68I)Tz)z<1YZY3p%b~TE=h60SH$Q(3XR}gj%fvg@$Y!NTSxzhVckzYi4>dL+>Q^ zNdL7U#|z(kd#(T!K{G^BqVOl2$v36xHeykDr70WcWfUHebmh+BtaHK(tu@h+5TLn6 zx(!QT&G8S}QIf=!bnwI2^XBIy^Gfo7Sl}L8e)z=qE-Q%NnY{q2oNQW^zFXuBM=r7^ zC85x`SW5%^M+F~r|Ei;~rE(Fq)yO1;+anG=&-1%rFQ8TeoI1)d9)k^*o#rRGj37tm zB6`7_)x0p6z=l!8XbPhPNhz7j{Z#e(IX@nK%{nKuh}^W&)E%^dhPi$m!MCHIvu+*0 zsAY(t*T<0D;P+RIjuXc|=x9SVdbmNeN9I3fnDLhz%4%Nh37p4?I%}0sbQeNHcG?V; z)nv?Th`pCYw(RN4YF>=3IWvhxO|I8RU=<<3b4NjIKv%|Wet&gxnx^+ERZF^I!RIW- zKxZVvKcVm+?2%<&zwHaMMCQdLX`RwBI)spxy~o1%6yZpeQ4k=><+nB?2iW%io;Ogx zM2{p)xME^mTi>;Kv>XUq?_WldLSVZp+p!y5LL1^|47Rq zKLBR_`>h9ro#aOh;kP^D%Ss0W?K0{=48wUY(pEAWv`qA5 zx3+PSZlOL<`z>dS6w*R#LqvmAIqoK`)zNO!`a^&-kOD1p26QmGFO0RFZqFhffzMIJ z6FY-fkVXiK@&$i@-Jvc+cbs&Y;GR8zs9c8kke;&d8@vjmsejfWCJVl@Myu<`0=elD zAR3lPy0MM+2gqjUL!6H=xangc-p^f)_*-HITQB*#c#?3a#~EosD}!Ixr?s(N>-3`d zwe^xT(8pnq7(p3EySW1Zy;Q&*X|Ekf91TebAJkux0df-hHaYy+tIxDYI=Jj=L=qkw zAN&0_DRe{6dGWs3Brf+$x@_BRsV+U85oaXuvFeL6L|w2U2Gf5xNfSP!D53e~lc|1e z%rb6B#&H$kHpslTKWSxYDYx6YN#gqvux0Vx`8b=iwz(GtD#LXdDcy>x6k$t`1Ez1d zOhzH)qnWo2tk6f&A8V4<($w>L0vSNl{kP?|vP*Xm!cq8gE5a_&vxoL6M5?^0u><^f z4x146fDXM#B`JCw03p{u-sSx7ZRp|2(~3?>27C}t zt^;e&0Yn-vBU07PiF4b)0Ilr&CuLlyS3z&NFA1L3M2vw9+_l(+0Y@W^JC%8q`ob9&n((%lW5{ZclpSK`~qn)I)`<*h^A6? zRKA%mDFfCaLX2~rkbZ>xTvx+&MkN4%vAj|+a&wxxoqL&5NgbS5xMh95dw$MPoQu{? zMrQCdQ-DfgxutGqN6)L>hR9eXNnW3`_FF}Z7CG}f=hhcMJE(uC>Fo@``K+R`Y6Zg# zoppDHzyJ;}QFi(+cJ&sxL)*2_D#W-ZSHXhY_ynT_BkRqZlBf&d$)bvh4MNj*5KNqV zUuY4F*216Du#>K9Oi%>!8T`V$!6F-?D}gTija*KW>uZp@#1pH85O^`H)8NJ=O@1I= zY;E*SpsPM3c5BaiAcHJQ*TUCRsUM0qq)5`cmvhQyH!VOTa^eBISL)RkT2)O-IN{N< zdz>hcg3YvZMu&(K)}_L3Ys5-x!75r6gLOemoq-mfO)8rHXyz{5_Drg4hW2dI)jh5A zp$_rHvWl%foqCMhf5M_3a!9321DkBntgo)GvMKb(&fuG#u0As z(xs0UG`00@`Ue807N5cjE(~M*k_<+gUL|?Hsfq5dc^fh>z%}y89jasZV`3X1+m^lf zEAfQ&y%$L&IkW%?IZf?CnL@EK)3#l;-oUZD31EW|#UP!~;ad z#g>r;C*}`ai^CxO&>|Z$;Ljz4{zE#AK|dWPURIE;+u^imH5JH+|9Ru%#LCs^4GM6> z2eL5s7HK_;(3e1pv8dGE^b+cKfA0DjPFik&LzD|o54+{^0YvK}&rB;Hw5lr!fLq88 zHH;>p1mEsZ`;<3WXKQGPL4wLJFw+-XG8^Rr?dmTudBPkt0;|Wp?TC+6t(U6t4l-~< z;@>U1xen#9m9Pu;VlRl;B*@Uqo|;#yl`<~H629o6#g~20h$ZNxr(|!XfmMdTHw$Lv14X5^wft}kV5FRSa zbf**917wWuNsseOmAaHV)O$l9+4C+>xOL6}f`jl44`3UYuOEebR;=arz5(*Jw+WYlB=Vl}#e_WjJ`Ifdn)^42N&&=s?i zXfH$SZj6&wfm!R&+Z7O^&M(w8C4iDV@&2>tf{Q#irqTW@D2@Szd1=@DZ_m7HpiunTzZ&rd7H~C>@e)X zs9Th+&Rp>wjXNRHwy;=P#7?2mZ|&Qnbgq1-%~(G$N*8K@O#qVJvpW>J8NtV4# zA+4V(lk!_Pd^4F+;$p$v+ja>Mmp3{#^_3{C{P{fs_w<_UtDk19}>zX-Fxwp zB;NXFwNh6nka#>HwMP?tu{AO7Ftf-Kh9Wf#?m3ZEWo!6y5zU*lg`S5t>Yqltvv})7 zza|NKY~n8q%r>#iom?KLjr4a656}_K!`qTSocp~ut1g4)7WGDh7i9?aa*^_!Gly6> zU=c#zT13=XC&>~UW|t(}?r*QYMHW|^S9Hzl_?Yf!1mVl#Vo@3?5vgr30#4HOYITW+ zdd=Lqf#l8a6PW_9FTd8WPwQ#CZjNM(f{hGy+I4_g-wXPjjl-QvLwzG}hY}NJi+v9^ zTK{Jgz~!P{W$UZFh9cs}05mmV)T$8X_O&Q`*BpG`uVex9!v0dupRiRPhK<|l&tAHg z(^^9Rf#5XHe+;|~-|+tProdCSSW1=P2B@^L4v*(6hH4K9_z#1l1pA2f5pHa$r*r|GNp|>s;j4b15tMw^!kC)^piEWQ_+7c=4jLM`)pqyX51DYaW6Pkr>z2v06N1Fd`x zr4GanZfuVO$Y@QRq%X38k4aK9)L)jy1mid}gU}|PF)~s7Z zh4|dKz9Xr;HuMf+UiDuKXGqai0ZK`u_&Dr51|I9HOyJ^^1Jpm1K`($(~UKci}Ygyp&@Dhw?&;q7O6_i zClw*3TLu7VO^T^sG=j(k{{>=_ohc1Gx*;cH!Q9;0cmHR~D zE7~{(_z3*tKqr3@G~53~$TE6H+lD01hc(gfP&K(?59f+*672z-D{-s#x0hcg5oqa@ zcxgp<;NIGXImql(v9Ct(96oA5(mH}r5>CharC23iH#xCSJ@_b^e@p=fAXI_}<9)k0Ysd$LSpey`y-)6>%+!e*kw8kj$o0Y#(9d3v! zQ0DdN)p#V4;FH#J!D1yL8L0h`O3zKTt9u(&swyre`->()=D z%E7KG_O~?IFvHcGDvKg-9`ne%WUZDSYU9cf*f#!2CX+aCRPNKn_Z zHVJ44dT{*uTjFk3SL?1_G{;SK#JiW*1O2m8PHl}+bj5;YO_&g}lnpO39k)(1|Eo(R zlscnmgbjWlS63wx7*eoIZBew(YvE^y^|Qi2O6S~0NmF|J5&j1BMoGFBVcD{fcX0)C zM^zykJmy=QHnXb-l*k7VhYFgGRg`5(0!YN-IaShsk_5kh4-z-B?qr5Bq5{DuPy2-;IMfNy|!{(-XE7M$U+xk4C2;Cv4rljdde5Q3cd`6v8VOb+rrg^j$CST7vCN4$z6y_OZ#Ln0glru z+MRbQ>XLycoLvX-kJgDJ`lvS-g7wm7*Z?6-`qr?s#7x`Ez6`_6qL&_z0RQwp_sxYS zdeAb>qMv1VoOz}p8R*IyH%N(koad@3eI;_>`8a47FnkDT_ODc4su!m*r}i*JY$#n< zovZdfBwiqW==s#F;h)Qmwxpn>hS7SI5FBPNap{Yg=O{H^7S9JczejO-g{m7$5cYU~=dWPuxu71`lh|dd%Vk!&xVonALlBA@#U4UDnuZ?A z!Ujp<1gWK~wl$4Z7T)YIzF)3_Xmt^R(p9enuxW{EHHjd7+QX%a&cdShDEmpCyHKv; zM@McYxl!?R`n12pMDcr;dIO9(ox%3_V*jW0xZ1stEQd@ViAt}wg9@^+GfP!wDxA2+ zsZ&Hts0^g=B^wcs&q^k5$oiegEki~&$F(|DF0s{p>?|P1;<|Pm_T2qYRIYESeQ5=; zXUFk-giobwonivp8dBkuryry*<_AyTI%=W;(+R>+9Ycl|lfxp>gv1gYC zgTB@^tr7CiD^{d`sr_q2<=pqIb4inQ(`#s(CV51qO(|9c0-8e$(yRV(-JByt-SqpX z!ikl&jGZF>E`3xxKTO>1Byt9`?yio#>=JvUpP>}HX6D*w0Uuf9OdL6(X3_qj?e=-5 z$1+XZltdaY6NCkpc?_KFf(U9xsc-Q3`!~*A;t(H8$3@JjoF*bQCf0-#%xF(P$))!K zq2^aEMjfoge7W9O{xQ>a6}ZON6(fD6CNud0^96Wmr}!Z%DIC8F_96Y9NP<^P&knp$ zJDz+t>(q1d=rYea(;R}F$#@qcy+y4HE0{z>Sx;SHnj@UvCS*MUJnS!wNI$D`^wh~5 zo{G>#{O1-|9olEZQ&)%$1>pqpGIudISUqDM;{!xdh@ccKe5&=Go}mCGYwzz$fJ)@D zVe5CUtBG_qhQ$uImgjHp@Un=P1>~7e~_NSUdSaR9~D|9 z$Oz0H0eJ(qQhI?owaf(Y4Iv*p*A;xu375P{Jt-sUM6#Jq*QM`M$}ZWUPm2ef*Lnp6 zK9S&CocOcZsOT}8*K`Q?_X%1bCh&L`R4LX7k(6T1JW2!Rdw`kee9IODusu~V(jb;L zY+!kDO!>g~QPPvJyDhHe?bjvJV~R7Zr`4qOW4Cb!>RG0E0P%es42zkRN-_QK$-m~l zeg|fL!XV~l+mLwll*Bq}q-#`LMF!9S+{_IU#UBB7R1H52qZ~TQe0i|AB>6jm`h=XB zsrQxTc4@BJ=EWfZ4oI#OQ{MyHYcqeBpLPKA*(P;<`>d*2kJ)2|j-lJ&o{zVAPSi)J zB4QT$S|L?HCLB#*3sI0mZ~Q(t(^YmBlWP+KauNQ_DLZ%>J(XN^u;GaAp|9-Rg>Jot zY60p6(5qMPv$V0vO`Yak(C&-=Q7kzif+9;~(=}^Dni=zRWqKmAb%Dae@S#C=wJ!c! zN7V^udFTr}O@(ecusi#6xTVuJn|NDkqtSM}wa@Elo-V$=R_uGHgsx8O9!aShHgXHG zFn*^T4+PXxQG{oG@O3aaO3n0b5aW1!=A}5|w(7sSmi7m4NU=O0ZobQGskW;Jvfbg_YyrW!_=`xn%hq%G&J@u4auuMw+Q6K&`&34MPF4+hrHJSW z7;+=c&lesKHN6v6c+)J;HYFi6_~?)Fh;h&3$!o=v6`zzmU~s^*@lvxtiZS$NboaVq%*ey5y)^1kN};Q3PPqzD6nrP3 zn4Blakd{??qvGb|Yh2}EF^yckvUyYB8(*BN0&CwM;z8ZCo5K7U^bR| zHO;Zr$Ml;O5#0d?Dt;IRmv&3IOQG%F(>^N?UqJ@P<$A32m2h?4SZN_6LJY)6%jyR< zq(l36a#UOr6h@om7FV2SRSujG!<+?+`fUj(9L-u%#I=cotT5b4z54y=>z>fY1FqUm zhId2g-A^PS`rsW)^DyW1nv|0E8%RZChy)Qi3~k+ysL5WwqH5bCxn~`FWa#sit`j=P zv!`ln(ys$?wU74c>v4&~U)eRybfmhPD&N?ks^CKsLgQUwvxMXC6P2+Bmw_tty4~Wi zrB~@o!S|y42!VO7yFf}eCD+NSc?GR-V`bSxu$G}2N8V&g`04DMVEez|Jdu53a}j^& zaW=^+2`^3q>3MC87FSyfpd&DvTr%!LES&Hw67i4ZLfW2C@_?9vT|#t6_9~F_ix=FgK~MZ<_VB1z)6R8T zQKI-Kq`%io)BpGtXxRvq_^4D)30rfQxDknFmeRS8usLk@gdWzq3i4=8C~P$tD0jX& zGtMJoSPjD*F7C8l&AX5454D;SYe2g?A7$I>)AZSQcHTs8>H3vry=zh(oy+GADv^f1 zBp8tUrz#@$PWrP3qyw8#7C`7-O7S}T3=D^@K@kjPh87CJlq98|TGx(U)hqIdw>(i`$dp8@k=P2O>Ec{FMDeGdz&M&pR^p_Pc$r*i0 z?{A+0t>TyQ{8R$>WW*ZO5oU~UfaHG~J?z`p%t@Dhx8i*k=)MBH3G72*wgJ%oss@$h zM(za|8o8XII1pXk!kA0@Hx?FY1CsK#YV{zVgbfo@4j>GqdbSe~uNV5-3b#jx*mGS`KuFMSuaP)19 z6l{u2q=&hl9hs^xthKAP4&19H6azR;1Hfc1E;7QliJ__ZxJlcsEI5l+72&$3?RkJ} zFvr`#RbmQAJ|cmB%Ok!Ja=k^~fb|#^BvztZs#GO}zSd2>c3ToWDVABfb}+fp5Da=M zDpwZmUgvMWXYm`dh&3L*)D!4Y-=fEcRF*dMtlb`jA0A6Ri*C=WvrE@XIWCd^c2Qls z={3ol+8KiK&x}#qKIl>>5_n4bO78S1oD`wHp*7xATFBadk(UT+%+tAPFG<%GWF#9E z9@CT%K|NXvq&)mc_)B}~mq6Z=)ZMJ%;|1Mn>=)lZmKv3Epj+s zEs(s`u$`pC!Bala%QO(sVCtIuTd)JlF{oUx(Y;TOHP%0(yYJwzi*b zKpo;^KAEy8kzH-&D!mt3S?x1d8 z{Ov6k=}+rzHl-cE7g^5n#ke_$5k%CyViBed!q%eVwjASsKYY2UhqmQUjs56^#^kU+ zu5E^(qF8gS`&t2n4rqy8(0+dzUakRlra#+w25S($2yjuerVB^Ou6BO~QFID`co&CN53+-S zeds^Y&!%Rn55P*3g6 zYtRw{ZKjush?B_I2?+~;?IPKW~c99FVk3-VrbN`AFHIc(Y z&mCQMwTA126I>R180Xc*VCh$~t-?2-n9EIsHgxzQLqBMtuuX8yZoBhti$=Hs&+-ZT z>H=LCedv$T)p*XJbPOp2_Q|oy@sc$#C2POx zkHpsdKf&|^lJY1od)B)M$pJtRbD(`~Fy2DHlNBiU+)ZVFft(_uq&`oFJeYicu}C=| zRj2Tl>_qPbj9_VWd_Xm@QhW}1Zu%6Kx&+jLWk7Y-gK8m&yra4`4Zqt;pjD z{m-?qN2I^EpjCt%D9yl3*)U&dhlJ8JV+#Pa$zA@2m!#riVfXSQp6iGI@^JK%?9=0O z`LFUUQ>>)Mq?o^^%uVKAhBxOGLYkIX$jsEAdPdJE50ugFX(m%;v*UCZ&(t;%UOx+_?ZX85SeQUz>;E#m< zbc~{~FTt-8{ur{73EV+?i%_{%@Vrd|GCaoU8?Am7^Cy2>_phA{&fc)txvt8?X{3I{ zt8Dq5>-{51_E;|0nt7Lqh4arO`n54ZZ$O($H6jKZl|r<))zu8%0OsAhWnrr5ITzs| zDf(%Ch4%w3|6ITyh#cG@NZaXQSm&)W2>i{$Am5@_Zw2PMCvneV$=nj2BJtMDn5$ma zW4E{d_NSD-_qTrG&ZKzFk6oAT&Wu;gDZ-Bk!(Nn5W^H!Q_qVrZd&t7({7u(qBi^3a zw1cK^M^`L}JORbkFY0FnEz z4aDiAMGf>dBO#JtW~BQ81piE!U#O%DZQAAZX`;MRypgZ^>jt{;NJyGI0%2vnDQ(00 zr+kwj;d**4!>kp=fzCXzOZF6vm})f=vd20(pW4p}jBL)ne*RO=0@>ux)7 z3YeJ!lEetfoRG|a(4RLF*0S#Ep#eM>mrg!wM3zD#O}l|1<$c!7agD%0r3Ib}g1(pt zAy7IgfH5ZrDSvBoAp&G!*#>`_uWDHUEL?JlJzIayG1L%hq*Jz?#pC_otD{>cay?|| zd$T$TsiG;zHlsf_7zXesmd&svM+#>I`gj|q3_ZcsobEAW%6*%qHy{`!1ntm8ckK(@ z%aWi9gPS}Q@$@L8ma>uW!&$-Xl=82;32NdDl@UpE-1J6 z>?@uG=uXT#eYbo5yAHrO$^IWj@n6s?Q2tR(z{eD*B0MoeM;C1|S2K3fQJ+&Du{jyX zL!!6Gc82Wzuf#8Eo}8ewans57jaQ1k=+~3TdE)1(Z#@)kxD|7qZI67_w*ZpV`~bm- z!2ZyT{bUL^+&a)D-EH^jZQnVXIX{9SsSlzE@1EvETZ_HoJTS0BjbMk2pW_bbN0QG= zh3(hpz9@xAOVv#eCWoL(=YXv9KA?gC+Bf#lw37;)dL~WJfPuJ0S{l~5w*29Qt0h$n zTQayckfOg}5}M)D)bpevXIUm1DP<=GFPZ9o`5o)yE8=92D6}2e8$NZDzZc<1-UnO) zS~HZcug$j~x-!nkfN+*|ezk|_P%lPPJtb*m&B!f)h6f8YNdDXhPVLiUxk8+E4C+dS z)9*|iBN}A^05YNWo5KGb5=@{GgxQC2q(Qwv1D^J}sHBm#G1?quS=f<4I%<$ikn$Jw^>-4}_$fU&t{oFv&bsJgGw_;Ieq4+>%DSYx$9 zqLA{y;@iFCGw40Qkj5HN@6Youe%`0R`slkm8zn1lqI32p5(FHDC(N9mu4a_@CF&+Z z@8w#aTJ+tpy{xW>a0H4Wtn>$oPzJdm4RnF)0OaNS`F7vaXxL8Hr(`%#?>cq$XB&)A zWgkJSheQIu5Ezc~y*E-|Kk?Vq5BkYW*B3^4Pz33r747n|c)|BSn%PF+fBdGPRx>9AhAy-pc-m#jp zM@j$*JC=}emG$MUx+W*CVf6I6tP!KK5O;@Oq4lRa!1~}D-^}#;oM3bo&rm5K*PS{$ zCZBt$JP^L;tRs@srvWrz4=kLG_Os#^14h1c;nWX09rJ>cuS+KhpWQzBm#zBk)$5$k z-#!A5xqCeHxqnCt!xvCImVenA3fz4;LqpRfy+mOLIE}~FiU)?EHcv)LYQ56}U^G8{ zutNHQ57GR<`m@W3-k|0zOLJp8wmX>cumllyjC|FJP3v@M3>W5Zdl&oGyZpTI=MX`6 zo8wxgSd6Cy;k@(g{8U%vI{oayKsQAL=5qF*roR+c3dd}){*!`DELH8c`wJ)!zi%P? zUNK`-=m8cc{sn@qWm<`Zdovr>`~PKt4g0>AM$;&QH`~tz=G6cgiXRTX>zS_uFBq{cuO8yI z8TD!7u!@?Sy2RbK)B51S+Si?ds}{~4`p%=tRLR^jA401m{7Bm%UmAGklu?n7+?#Sw z$#wiS7wV+#yGpJPTYKNIW~|nsM&iG2>^(X?;WR#4;s+Z4`z|ZC$U0ITvq|}^60d>{ zPdEa%PwLr}Zxs4CAEj?DayT>2t=;_r~W6Yj*c04WW7n@$q29Zi$`l3cJ@2->-9Ct2`T z7O+8M1A7(*??D_Qx+xG(0X3rFqQEk+WyC}5ZAGbwqQB%ov>@%M2gC?Szq6@ur%191 zQJw9j`+(iFPRk=r6f|cK0OE@UCf71&xdAV#=_Nn> zH@e~g;ehkq*e`Z%>*Twqq_b5qz?eYPAB#NB#C8-{^L~1%5qsZw4J2Cxwoy{v>9~Yi z`MwLxI}P!HCxU=<=x2#({7Mt9le%3qSBAC;q<34gRIg^krohpxjWB9^C#_^xyXk`8 z{pEVMv`TfDknhJ)>p3x!NhQK|J$>Hm%L6YN@{b96!MFP#z{@n4=&jF2kMKnIX_ zp4q%)-Qz^l(5sgMh5eLnl!sU`v@hw_xvQ{lMT8TS&{cVjx4wQm(-BBzzs<*Mhq9Z? z^Th1SWtz}Po1 zLo)xIT(#H-Pi#QSwZ2t+8whCZ7QbC!xdS0=-?p$J5$6q@fnWoy`$7-zd&Knirazq* zyAOeHWRKhYiJonsZ^j&k=+mT^eAOr7s%=1seD$^n@G`zg4msWcJ!>fhO!+SyQ1;_n za@ezu&406=1i*E~O+t?oNe)0ToIjo+2O6(^jTI?xU9xB?v0DlzO1ip>cKZz4kM$ZU zCw+fL2?70@)HZmmDc$AaLTdaimyodlWtG(0i-Yw>8I^EagP{QCARW7&YRX)2=Rd`I zi^y9`d${91Bx@^d(5QLMLzqU}ks%}gw!$TGY9brS<5p|1?g;DZp|>1s9o!da2tf~e zT)^?oV#s$Nh&M~aS%4>zC$?l7JxV|Fo|u0XT4k1n2U`j9QU#$#7>0!G|>0{u?_i3Td`bJ*APZ_U^rf)t{ow&5(8Z+-W7wZD2NK|h`S zdgKB#zn4$sPu!ClMcCga8`?_z<2ZF*A}MCS8iL+g`)8y5z2d#>7rFV{78~nkq($7_ z5xc14GAQ;=Fc5VC3KCE;XPhK$?nGHA1!5cwH~lNzrO#*7K3nFz zh^*LbocnKgI$R`ZdQ1zyo)Bg;{g7dm`*8B|2rBZoHS17&wi0S`MqZtOcH1DdiPs&r zP|P9-HVy_DiTnlr|N6ASN_9cJvjt(TQ+G5$n%|l z2yn(qEB(JmQ!36C{0L#e-&xRy5(>Hb`um2+T$$DA#H`}p+8#@ z{=up8Ww_IM?6zA$ zT?;&v+56vW)0R}D3z@Ku)lEjW7#f$~mP@fHDwRP|DVJf~CWI~*y&K1l z4vi@^GU6zb9^%!a*|C~tg#!1yEQPX|ep$i|?D(Ir;+df0T>%C*O+q~JI*Qd!CFm8< zTbKkL<>M2B)F`=rBZH~u8ZvV(c@?kk&Auu0y8-Wv)^p8MPhTRJwGpb!%i>bOnEFGDqVC}vFD zQs209PEnojhcmxD|BmaM{a0He{q<2ZWiJ|~eeFC@Yx8C6m$&-_&y6Sge11#o_tzU7 zP;;wIGcNWX8ylt0!EAZ5{KC+$doQXKmALOedhkWYzSKsj}*Bf)O?IJ)ZwXhB6H4M(;zMer&A&vi?8%<`<*o2>Mr}$-f0RyFzZ{FK`Gbx4g8Oib#48c-?48; zcsU+*%|4XkS5vzX)a4o1^7cPher@-5wWe;v7N#@#OY+bGUIaFiI|Lrfm8g(}ew@(B zmTn@Jw>QjXBupkLAUyS&XL7whE$ZYUs%6xUEi4QC%5L}!6-}_| z2`*6%{{}FF_@2?ZZ~Y>H4m4_G7kz{op?O>hqS#W z2*gIr$;$>;53cWCP?jJaeKI;gm^tjGgWyCHMpq~D4rn48-LawsWUzaDjQC&`*^s*7 z-*4=Xx=&F^{=;GH&(X|VL;$!p8KV$d)|W1K5_UVrDTi6RD-s@pYsRC^n!rke$C~{0 zERe!!-=}H@cC9C^p^ofh-j5MgAosf$q|u1{4W1Iu!#8WW@xUB~ve~|6_NAdYXNh%P zqDO{Fq883p;`wB;TZ<%QF8O~*|w{`Q) zRU<+ttGuLsaQa-LL)&c&(mU_vZ+PPQH`%ICLCKMAtw_xccz2L@6@?1lqO17I8mH~b z?MkUNi%!Q}IAlh(7d;M`Wy4htRSfK$rrRy^2eaDDn7s|9F)nKA@pb&GAwH16A~mqS z28|kR1?v%Yx3)0v?J18cEO0lWXtNa-vt-(0L|Z%$ob9C~?qXK$J=5!YQ$41RpA_QB zZ%&1}Q^;tS^!b-AMl{p2>+&mSdRy!+3j~k2_6`>=hNRTTQgt@z6DiA&-i$=D4{0J? z7pAl4+dd=KcAq$iBU_%HzSwn?W#r@nAH)Cu5gFIS049OjU*V(1hXT9TXmM{RgF{yDv^c@Yb!!gZSwj5%Q?-HzX4 zej~O+SXrsUYYk9Lp3Y%E^a*A*`YITutX#t0d&nMaEK!dFh~4^ zVn#5f+c>&HNlSm(v?-bAO=y-}_{_0c#jIV=iT(Us+l;Uq9<@lG?7uVV>;Rv2!pX2b zrB*%rHISYE&ok$B903g)xvcl`BGT9(Y*zcd?*4D|4yJAynBKiCOv$;cSJuRY6{+Mef8FUx*@R}*(+g$!ZZaE$1J{&m0B zdfgk|-1C#}9(1sPsQY7^)XAz6Bl~O0#ApScG+)e#2M!jc-(Nxwi1Z_}jF`~)sim>i zPZ62UN(#E2`o@16s%j4hu+9UT#B2eD~!a4MVcJV)p4%gFt?08Jn z+3|8c#Si&3>CO6T{_M+O``E=l&AQ_#I+x*L!H&`xe2^@=we; zo8&N0+*U!F+gR>i`uJz=svUy){~ncF(AS6k@HG5}m@!9JcLypQr@{kZL#UN@jBUe%L1dq|H3~jgt1GXm>PY-X}`|X~CIk zTN%MVs!i&$W!_0DmE%@-fm1Fl@4XGTXoZaNRLaPScea-1J+^FX#-2+K^-qV<_F8Rsx_AnQvi!@dUZ)O^_rw!*yYi95q{Z=_QF36E}! zmX6P5<{s?6pch%z8YLA|#B*aD)9x`n^_nX$RT7+|+m7TlETA^m@n?qo$~V4vb^P_! z$c^(?)g9z1!alqA!orqREZuz z!k<~9!_jC~xk3Nf3$Hh-_; zXdSd?$_>h#z@^K}@Wrv8N5>Y&iUPsnr>~<#v?O+I@H{VdtAJ-vK3f~)c5L8YnL`I$ znV7y($F|Sj$v*y!GBD{s-sm=H@WN3O(R%9bYK~&mWK%Q+Edb%}{{t8yx@D1xR;wD_}TfM>+A?n1UsuL@Y5gMr6dZ>jtk(|7pp#pAKEByPB0N zHImIl{%1oDwx^yQ^|`q^zm7i_{1|mvna6s_62C5bve5QVP}J0~j{kuj>;UN}AAiwh z5f9p=6N7PhD`@W^W!?Dv3CP~rcu8*~wR1f(3md!)|88?nlE!um02IT!K&OYIDIi_z z@&-D*jr?1rqbm5#QMvTSrmMu~F<8|4pSwio3@_tBj-g31ta*sUnaVICQeksYwwlmZ%&F8(WS=|Ve%$! za@CbA5MT16v#*5vm_dD|y?1I?1o=}dRh)rPM3hkcPrRB~9~FP%hg z&nS=|+4nxc*sFi^x`RgkLCT~l;CLn__}8tcS$31JBrvbWJ!~wkK43K)f6~KhCgf=j zzhi~3rLbK=e%3k0DhJ(a_`9sY_HRdNLlOOC>^&t>u_9T?!_((J$!}Pdza%fRv7e+1 zaj`#8l2KO%zMi#{+UO|lJ)wUd>!UstViUhJduVgd3aaCR(TSbgeJveKNeyss?)OoF>*={MP0lh%VG``+UiUwbGsn6Hk9 zIzL4q_ic_9rLl9-gzDw>RBygdH z=#&XyF6!%9K;_n1%?a7TH=d=ApBYF=o&*UKS4*Dy%h|{OVo6Gpc0$0@*q^raYzTZm zSYYOxS7#*~@;l$=@aXG>WaYph>1&QvDaD!op>Dn9ue<0Xy&&bSkb*$^i5jSbJlSd+ zo|nHFd2B||?$d_+0)FY{WJaA~XLdTu?mB)#Z1P{FMOc2la>g3hqg^n=Rwewc1ukl@ z7{8ITZw*iE96hiw6Y4M$mjDM0FJTR8YOdw;^NM5rs_1IY{+xJ)eeuRl>S5qKR(K>A z{J$~--HNr)B;(;b^W3*0lAsyNLgpR-+Ah3~^||KNX@I@i1bynE<9)Z6v-62iwb~CK zt`&H+fzM%?#wF%45pwp`L=0)$ScMro_H$Xxit(-L^uZ?T-O z+oHU>Rx7P}c&NOQaITZ%U9Hdjbwtx*4do^B2rK{|bE!!0X3{UiG!`Gbb8-^Jy=MDs zD!K}x#eHsbLs&dPh}?zdhc`WKN;|6KR)yYjjMA zyr?Wj6jR4A|)ynvLvKZwPO_&cTL&a&cj zqjgrxL;mMjc0tekX2_#`mOn{Qgq;g|Uy25+rAyi+;m`)L3}O7p%a$!ai)SiSMK1Ci z|1$D$e3WVosRAd)2PTGfeUj0(gv-}Z8gDQIOX`XweGYdqF4a--Xmabqw>njX;DUXvD)Bs9dX2secc99P7c)BO+x z=?qv>FNx5gW+P#Qj4+amW|O@5vjFXVaHO%8ol;3wSG6F;5$`j+e-Z`Ei{;=TO#nmA zEelNnF`#}Db~S*G?ad!LPoB&U5Lc(P>^YQBUljYyIsQVlXoGn4d6Uf+2+h?67<+6- z5e^jxQiQb?hIHjl1>Cl>-Ar zD|?m5tZ{Keipbw%bgnvnF1n9?u#b`+>bkXyv)W1n!HuRUU9;}SjAm$=WjRx_Wr^|% zXsh58)q}{`{QD>4V?swXY#~AH{vAkHeDMgVxVbT%Yg+x@az&BQsjZ@!WzQT*^Ik{# zU3H7hieXyY5l$!2aHFvz`G^X1FnuXi7v z|1&pxz-PH~?4R7NQPGmvT?Qw4bBPlEs5bI(qpf~g&f8RL>Zj6f9Qj@SF7KO3qHHg; z$9{+J-3wJ6yIgCW1gW#hC$S}3$S3xfrE81aUMd$s>jX$w0WUckTr)*`|E?bVvUzT! zP=?CPWT1y4 z(cba5jB5cUmN4r!1H0Ky@mMGeq{RCD^Wc{N!q!CWLv7^CKr0Z-5Q_WBb0lU3MDZjf z*dCf1F51@bqZX()e(n~yQlPpV6r6;dp}pg((r^7@SUcurs)Cr>@aBz}qN@YQ81k^; zp1i`Boo+Xida}Kq!vdfRZF}B(fu#G~ErZ%w#In%K%0(vWyZL088ai|0N@7p*(_PLMwr*9dsnkaGfrjSi#!LU>e$}rx*FbJTW?bOsc zX|?ZcS}%&qY)XssE=YVyaXG|yCvwUW&e$FQILW;SRLG`@Mtdqp)eQ2s77F^;eONkv z?h>-KH#V_wHZ1)vDwtXT$e=e{UFW9+ut(t`G#x8CCSC(^Rcpcs|BFp*|S#NmDGQ-r6eWkGx;{HJ1{+V^X0AyA(5 zv*1YaHTV9m21>&@KQNG@^wbk+(FhAC>dmM<4N0iWkO=em1ok1vfjFJ>W~3+Kby%Wb zg+%=i`$RDxs0ncPO+lh?0@C8=V~0@~aqd7rzps(gMw2EH@V`#-?&ez8+*{T@uFl(1 z9@UU`n==%tPg!$uFh|AOkFJfuYBvWC~r;V8xk$tlX=TnN$#gc!+V+h*aJNM3& z@c-92tlM0>bs!?Cv*vd_71RCXBHG0R#t8yL%HWYcmsaT|Tqeo#mfUiB!l1zjnYmJg z*xSy@JGQ5)PP0l~P+By;F;Lq_8VAK$N2v>3>??y*LWqbSVSCy8H5Xv&KJLq*cM@Sn@H^X*GPSF2$0_x-5Z4W1Xu!^kPM!K+Yb$ zS|jhs-j3VHV3xL`8*0{Na#!-BPf4#hk_QouQDA=%z7}78rA5A=V9Uv0Yd_xA@JRC( zb~N7xSA&zR*&CSSIE?lzO;|$1e!20AK)yyT3q5#|{-rMGl*4*xV-km<6i9pkW1aGp zD08kU{Zo(%OCMQ6*!L*Z$F1-B>HlpK!{kKF8zunY=si9-)g6m!S-Au)mG+{}Cv5+qe4 zjd81H(o=A)Z_uX_b{J-6(LW(v7)a;p6QdDd1gmDzKayp#l|SN0rpB9x;_{@m{~k=eXJSaEaPQ^()A z+XFPp_M;7fm0k?h-uo*P{1+Z9baAFgrOk;JIXBTgxPLFDRWzzb#_NVfZ)Qm{ZRNTv zxch!{T-k69k5MSXVH(Gte$>dUrIsycqt|z?R-+3eO%PkuvJ3QgOneR6Jf~Zp5?>XX zIwv&(n^EkvO($?4+!JbMlH6!{V})ZhN>w83nN7Se$VePMFsOD=+|>54MUL<+g{Wd` zLAJsWuvm_%GQS5E9!LDCCdtX@-s;2NUi>qoEuzS;64s;+!8cbHoSwiT)(e|>`Z%~+ zU~hfRNy0hf0}tha_7Eloov{ zUm24jgt^~WrOfh<#NIs7g00~lhC4+qAq3xmPG*&VhuF_ysP3{WaP8q3X07U{VrBA$ zBo`nI31|IO=XMp`DwTM{L1-0}=h?TFxKH&smL9JcGPTV!5O=l zCVZD@JLqZGuA(O~+ys^oN4RKNr5r8?aD*dwUuc z_U3{dZ7*crbXeSQP4q#nri(m7JaWe2am)mkaq?w%UV$fPV*GWjP1wQ;_cvsQM$V(X z6BhOf$I};jzh%jZEz9!Ps|gfGRpct~I(9#kuN72fj^*BpeRyf}XYiRPg&6>lYWU!L z7jowGSgVS%U;JY8LDQ@tzz^^I7`7evQc0~g1>)n`;Fd8bwLmO z$BYYgT&Zko(OF8l?j1Q8 zvW%W0nSlw(jIQkna*S9WGu9j|A)&*r_J=yQp&r(0oTX!KKd!zt7{(kSM1~~&;|*n~ zocx#&;181JWz4bUGMyqbR!UqNhp>#|y*D>v@GzFW-u|v7_weNIxgvjS6^pqwZ+v6P zsQA&2@iVZRP_Co5XE5@|)^O~Aap z4PMD^@}Xm`b=#8e*>D}TgiX!34<{Zr&t6#F_Q0#A-e`tXq1{4^3h$#0a? z^&yyEW5yjTRu;vk94C_gX19qp2JWNpN~BdiK@UARtBH23ida0$)-!`Lexa*(POaG+ zPiyr7ix20STbnMHStyKt%>@v4{yqL?ZYt@3$~AWCQ(8#bsxT$8t7yvH{wfmzCPJMI zYzj$Uj$qpsr?V;1qkW4g*vr|0|7C6}Khn0AQ0?&OHM{&tOQfxG;D-ua+r*4%OrRXd z*qEM$%KXk4W;{&C5-jrLhRKYax=j5QDyn(M54nw273$bDcJ^yhmf=KWHMoh}%?lI; zHz5_s;UZgLSGC3~_{f#o!-HPl^}R<>By}W~*=F{J#PYVRD#;1i^6t~`1@u;7PaHF@ zscZ}zP9$;JZ7RsNqF;HJdjdJPE&)nI1#qW}xiM=;8hI{=T5>0CkbFy?ZLG;N@cyYW z^zJ*qRXCAc)^X`JWRS(tot9mqC}sd9k={FU{pEiDD%2_OXP)e>cA0nmau8pRK6<;j zR%c?Jb4X-MBY7_21J!$OM#I65;X%7Mtu36fk_R(fyN8{;(j65o^Ey48>-=ogTFjmq zi02KYQw&Fbe^y!P*2L&>ba0|y?m3Qy)6A!exJ3EJl zrgJOZxRJuRaG}?<9ly!T z%&kAyQG;xWCBr?|sKBm1&usKI8Wigo~D_oV_cGKgeL^T%e!-&6BixbmJ%t;R%YNyfm2xt^`ctthWi%UzKoftYyZ)Z2*%5<`=Aph$0l%-*%7z zzl#)@yqi11DtIDx-f1Yu(5CLA+kqqn_{5Ga+c5I@EX&^#Ed-GuNm+xHFTEod7cUoS zgLK;rDh}P+EWPYnFDLAFkLdJvpj_Kho)fr^Is&uQA7M!Vc-0J>96V1;p zxD7ZGuP1Qgs00&9KQ@E_zwGV_#KzJt!r`K~Z8(mMnE?8+MIH4`R-d+ z6aWBnADm@{AjMMemMRoeNV#50Y~q$$M4jr$dv*OMLlU91hAws>jVyu!Q}_^P@^0rJ_nL>n$KzjTIE{fymwDF+cJVE^t-*kw|9t#I7!Gaa@x94P|^d=@}C*77kfr_h~*X39; zmZ0XYBLQSq`|7ttfy|cNMB7NG?L&_$YQJwbd3WUFc0O6BI{{Ul542uZNY8dA?BKJB%exkOet`p(1wtDo${vd1Oe6CCGvpvt zVo5AYGn*$*n%c}#&n9teP$7I7gzz4^HeWvSV2$!tJ!w^&4xfdi>)$e8wYr&bVoQXx zg>onXSrXVhW#~@h`Ge$771Ee{PItpnN~5Lkq3bk1PS3=)t-nlr_K3m!?c^6#Q=X0N zeSgm@alt;EK!wv4v^GFy`ty48w{zT1-Cu=fhwOiuqalc`P2buX%j7_jyb}4tKBjgb zB4HFMy{eZ}qKekKiM#frS^ji2!H-o~DEx?wK2Ah_3LKv@>myNma)gdP6-Qa%W~AN( z>AsYZXe>aqf?F0S#H1L*F<%!jXo7^fynoAzBLi+m0l0(~d-idaW3Bw&kmMssB3j57 z!D#v`eclL)#cZX_l?6w@$$U((D&_S}4>U)I9ZPG{O)S$C*oJM>apQ&>EEEs>wPdu( z*>wd1{xEP&SfC=*6XX9qCnpL3Yi;R)M$`UXuOK)EV zg2k|$<;C5fDW>02YG&F=Uf&lb@f_ihh2dc-+>VYrpBd>qS&B zn1Nu>0&IrKK*}-VaMNfQvlYBedE4F4Ee4>{w3xC#TSw#ospZGqhPvKN@(8}Nf8<27 ziWMxcf6OP`IQlq&q>vOS*^JI}%m5X>=UxxgvPS*ajX89hhiD*@n8dO{TKe0X$2ZYi zbx8^P$@TwQ<9b1?xN)b=ZPDV!iK`K>c@O?`eV0v3ragVKvVKzso3GQ|@WRLQFOp-5o{B9qrDBrRwR+^CoXKuZ~S~&Uwa~2~W zvhB%Mu${=efS$NJaN;_ZUe=o@Mx1a!x?~I$&+tHd3yC#-dIbf>;hvt$) zF1f`)045Nqz8|Q8Im?OjQ6I#*qFx#^CuN~7Gh%eq@!IJopMCB{Lp{SfD=@1;WAqE5 z$z3&XvZxWJt;~4ZvLs_f_0yOE3AX`N8o6*7Q#^iWBqp_9-r*W4Ez6=RW8B`JkxOGN z+dgdTPDJs86ReEbm_YC2dloL3#X^ha&<(!u&eBeGu^BQ(Si3m3%C;)U9!^Y6!F7!Z zlhZL}e@)(G)q@kAV2-7|96E0J;}^@w_JH1mnyrf|Y0k&f7F8e4eA|!(^b@}MJGA4? z-Gyf+^Ho49`1!ql&~hxiNOwld^?(!jlx?3UPKkwWDyx6#ekrAqbXtFinl5^I7|n-j zDYV=(d%b|(Xaz-G!}OImCjMx-FtPB&Lse1OmKP-nthRfve6#BNi_2(Rs5acF{7mo= zhgNeB{wYZ@*zIx{L-kAxXEQHYetNKrw- zoOy<;39b3aD`E7nc$daB+A~;{>BaSeIR=$c_E{9wgA5x1KUoyh07X&$mwB{`q7`u} zX~d&xQN$uf*;#|dRLbDzg`e;g?pP>(D6O^Nl^iN3bAU8y9=}n|zkMo#P=h)xGK{nS zE?F!ReDDg9ORxyah<|_)vwlO2w@>-tGXIu=mG=Z?e^p9n89|5@jvcS9e*b2LZa?9e z@|KCk42${W_A-fjf^pG8vCX^vJrO7Y_W%KF3;qIvpb>TW$q-`2(Km%v9j)WDFsjya z5NUpO0X#BLjJsV4X#VWquLNS;(~;~fFqd)TNl`4VN|Ll;AJxC9rcggwYk(>=RLvFBh?^o`4iB$pgp;kk zs*iahv59{;U%$3N&|I=%cfI6f7l6PEqtD>S<<{>XIQRsO;KHweQ><*Em_4YnK%B-O zSms;Q?A-nE_(PLmBhOM!pk*ExBmR`W22e}^;tnY03$i&Nl7kJ{9l4Sq)nUW;DI)QY zuXU5Qjz$lTsm;X;MB%LohfZR1??aPlxy0w_jVjLyU4L_d*D`AGZsiD+g<95m<+xUKxue)?5P+3BM+{ z_7!h-dGz)p$!RuhL+CFMX3*Bt;!1I52k1FzgxVmZtL^y4k|poO)#NnXOi~~|mWv}8 zlRxTt0KP;aW{SOCoNzEb)N1h4aa_N@M^XRC7uM@s@eP>!C(F_^7UnU`3l>F`O3t`45Vl0&@6BtTx-y4tvGm$>lDXp|%KVi)M+ixW(?shbD zuk47xe;uhvUPmMdA)@P|o z5r<1zCjn<&#{Vf_wl3wWkEQgzm(41AySav8?^Q90GuXd6;^;p% zsuJZr3`YuiWjkg|%pLVc61T{^)_qIK*52`)%%;x$ZPbh~LyTKLda0Q_1-r8F0l90? zOV)7y<;gVP98+Y-D|O=8d#`R28roB02?-r}G4_j~>(_-_H6@I4ndE`7ua^9Ihsb8& zh^wJFced-9mX^ZjWjkc0pQfr~sNoCwD_6#&WR~lYhbZD$UYn}z-nR$uL`WDO1?WU1 zK&g4L+aJ;7HPkh{BQ9fwjV8&T&Rji<#xq-dRTnQs^FJoBvzWNTJp=%tve~$r!1sVc zwu7F)*4K~kc=K)fqW7GP)6v;8Cp9B#LWw9L@pfB)Wr5)@v9HiZMTOyF)r^zj*{$V^ zUfxls6Y(x0qZM7d20sO_&ex zarn4iT1`_=^I}k2<@mkS-ylL3^_nJIyf|5l(4R1R<<1Y2h;JX)3vb1?+=4@G8SFF7 z<&@F&n8b&IC2m%qtV`zM?|mJ$!5^?db@vIQ)nmtgL*MsX1Px|cGz=`qX+ z8go5wHuy6ht9`pT*q`5Xj_GyZFnM9(8zbJ|evudtA>0^Uc!BoPC-TTSujC!tut4#T#8EPeq^ze};#;22;@;#r9Hb&7QkE_k&jAq9?)6rUP7s4q~BciNtH)U(vxgw0xhIco^ty;S(Z?) zVbF$3r@)hXV#*w`7(#K$SAk)I5EEz{CkrjX4!TPtrhpCC5v;%Z5X3LxZDcou${4%}pus4=H|4=km7OZiv%hI(|! zzQv3v>3NOPbM{Q74Tj9ioB9aj8rZORWyImJCdptgokILE3ZLZf5*qy=t9yCFWTwg_ z)f$0nw`t5&;`zN2fqfQ-Ke^=55UCs^9J%2&5`BH1M9-qCcByIdk#3yG7>8E?-pz_* z($2}oL4!-`cQ(LKE{2#Qqc$;;Jz8o;a*i1}u+b#hdrbmI@$R?NV#c)sL>LPU>x;uL zNKzO4vUlJ+_ka5Sr4-(j^2|fS`EVHCQ+5Wm;VPwVde+n=UAGpabALFds0%za}#|;4TcIR};jAyoV%hWb(CqXX$%UII>e2 zoLEWDfD~<+AY8JXP{ISMF!ECz{>S#(Ayo`x$u};6M`TiIQvMt?MaGL3ou5NE4v7;} zn0>LDVBP_RtfX~!A?@_-pbFSv3?neEdgc4^Q{fGbDQfy1F6DP%ZgG7%;F->^m|D=Wy;0k#4 zJ`P`x8vN=Fk~Tk5FD?0m3281&?8$*fywyn#W_0nQ?wcs18yN{2LYD(mIMa((o z4y^G%9z^s~%y=DaTgrQH*Y*N&<{Pd^YQH{Kx#wLfGj|eo08M6kw$k)ge%}#> zb-Smx`p1!2MwHnzj@O_HMc6)sKYnyUhL<0CqDa5sc3gEd6NrKCsB9Ed8N*Qg#(`;o z>QilmojG-@uxm%Ht2+z@3nHr>|GwM-Ha24-8lTAFY#<0^M3b^1*e`t9Vv2mo@t8n=I|C#UNgI7C?PHK1Xb*2 zoHgzH?&KH?zF^mi$+H{Hkj~)ZF%COou>pG_DcSoPp~#=1xKeQLK;m8OunCF6e~h72 zeWMe^=bIeHn1K*9e(vG=!W5C%g^8`#gT~)?zj??((7YD-XcY5`=0}%1e@Jm9DCi6d z-fHl8XZ8K~UvuRw6(~t%?=$N#eG;56Cy=LTJ9L%rkmUVI)L$ximKdB7UEAiOyocsU z=fKvxDH-#KmhzOAZ*q>giFaz+N1DvrlM!~S=yB)kZ9$DjJ9$Xj`V;)_w}Vz8e4UBT z>=SFyBA-cEfRh^Hf^a zPR0-4&pW8}LuxIo!xCLhUk7ovy5%K`^S_}fHG!F~?>AhT6s?@{&+$!2E3*$erY+16 zIS-bvQpT+2PF1flo^LfN4K*8CTl(C%Ruchi3}HBdIWNHSz;`(X-!S7YEmdk5ie?`0 zd{G?*eXdUgYRRCX8>zd1#*D{M&gMk`W4*l}qP=$to{0ga^@mP&uQ&mH7em=HO=&)% zF}Lh3V>w|IVy&hN3LyG&jQvj@_KgE&nU8r7qB~>5=+}+-xM`GWiwQk=rU1Q4ZWNGW zc#|(=hcDh3qJ3q`4CDsH)R$(Yfk_V9izBN9x*XAxK?6Krr{7j!!yWT(jgkWN;fk@5 zrjISF1f@qrE*ri-3+c6ibs$DTsc&Yziwu`tI6neMpU!33UJO*X%Kr#c3XQtQd=?PhL#x_K|%J z4$x}1ewVzd>qZ7P3W6tIO?v%0|Kouc26<>aGan@db_r#i5tY^7B~P)G7=h9Vs$48h zBSI%bqo{0~mw6)1L6UbospWl4NZOEtCPBKVz2n52Y5W5dUm7>N)L%H__K*SI~%DR>M#7C zC$ipg{_qq?w-r0Nwe#4{PFz#<6$)ppzG-9=DVSzl0;hYlGJ0=kE||`0Y)slRyQ{CdLVq4ca$_PB+XO@PjLbb zi;OuiBFKv+KBUL;lvJMneP35|I zGD@9TVR@w_F)(Hd9YB$Eq0TG-+%8^SmlHolbZ@}Xe=fxH(;j=#^o;*KG(hS_ z4v`|ChhwG)7@MX4n))fe8u^{BM7*}~7^{(pKyT(||BFxwQ84Ae%Ybm@HeBz0Z#v&3 zSv9aB!O!1g>L&Irr)&);gO!XuKlDjA#}FfoOeyGC!B>*_Fi~q2Ly;^a>+hj(9DN>t z6A4H5nzDnZ3^|(vx@u9=^9&^VZvOX5zV4!o>zK1u&K80k98c_b7Nm#UNmWo&xyWvP z8hI5S!_fWC!z6GeT|iTsAsu_#Q5(17_cp>&V?>W>kSu~~| z`59P%Xwn>-{*V4FxpJO8SrTR=?#0o)|M6bZ=TaGP|L_+C;smFitQ>3V)CGGamGh__ ztoe|FuWaA*^Nsx^u2ooX0?1dW`pJL){GxiN@z zgU_U#WA6RejIhmm)!VOMcktv|osg(W@*H-{#Cd!|k}r6fJRmplFRs*uC_|_jSyT77 zmlIcRLJqunl3Cs;T8sMAB+24ZXKJ5Yd-Ao><}^?CCNcGqa%}bczsFtKv0^TUGCMz* zpY}9qtb%eeb~4@b(zn1%a+70osweSlT&oa_4J7%^wfnz zc=;XODneEuEk_6YodP2}hoj{%jcCY9yx5XE6hm`R{mR>n+=?RMo1s0vjUji7XhgR7 zX^MAvpe$T==<)|mgLb+1cBmzM1^C_Q8-F;*yfnfz-bUJd9)_1ZHbt%%O?-T)?})XO z%E&ChuLCsZ(6YhC6aR&%#vz$8LdA0V8=Hxz=|2%#_;nCGwL5^mTwzf8`-=2&`Qz?ZcGVO zbQ!>Fjv?HB8G>xmGp&VazAsvWg8s(blPetrZ;p6)e-2?4Aj2oZ_N*e z0i2RO9w`{Y3h%+iH8tgbO>y2d97}#_C@Ab-ZD&&*=r=a1La%HjU&VqR*blsq$k)Gl zlueosDIwpsR;Hz6nPh`*jD9j3HTHbt&HvTUUv=n+sXgSRpmi0}5I_{O86bK7avXkb z^D`Lh<212Tg8mI08Bk62-+vfAp_Pnr)SkLbbwMzwyYkzepa}rq&!0YE5e-|6;-iOT zzBxF4ltgB5m4u`WQ0l|##bWH2fqN{^gd{U%IVPmq)E1V`F!g+j5X~F}(ejT!)0m|o zI8X605JZwxmrM#M!@*A^z_su_GFQE%F*p9^sMu6a*S{W8Gp49u^t+1SZ+vh93y7-f z)OR~O1{H?(ieMDOoEA)<)8t1X6=NF64+cF{Ay1hH5s6t-F;(NnIEnRyCK&2^t6D>< zgK(N6C$+$cJT9V&BM%OnB+EJKPv6$?ORj^pTVdNktflb2k&xG0zWxjh(Z; zbipJ}OdozkB48gL;g3C2qc{0n?(Li`{g?zhq}mgrA&-88+-e^MyiN zJ4y(Hb(8(mw=-aZh-u6hyj#dvj~d>PLCqqvqNi9hp|!#VYH;w*1$fIb1Q3_`@7G^} zDO!@-aIqz}(RrR4B4@4y;y(KgGB;GiY zTP=+BG&QIG_D!tOn7J5AdHz8R;p6I29WzAa+RapPcwFZ=1wq4t(me74Qx)#^I{m8r zxvci2?BfXfjhnbUkPHPul((zg99MM``3_LDZ^z`W!|=QK<6pcYZBw$S(Jvb&uDtfB>}KRNIYC#L*3O*9juL7b={S zbJ77xr4c*rY}X7(So+rhA|`Pj_Tm0mdW%r$F4WLYm-(r`KhaZ}i&M&|aLQ(7)^c%~&xPh9+ z+}oF&8B4zF-s^ku1vFNMf`e}khMKS_<_BIBZFIEb0|c}$@2G~ZJJWV)BaS|EJw}-D zc{x+K%wPLeLvVGIpbQE?e>ZdC_AAQ|+s%VjZ zeg;|0E}$`)ngW+QOQ)$K&}lOQApt4ehFk81QP`R@?a0);(enIRFwb?g$l2MKbA^Uw zCT+`M1?8jn+%@Mrn|zkh8A z;K-7mZ^{0G`S zSlaf!s*d^h6NRe&==p2B+?0X(_IY+UlVybr{6w*^? zG<6JfuDNS~n~C|lL~&i^_Xi3e(by~Eni>bdZ%c6C4vlLPF@z8PO)EU#dOFX)?E2lt zl8s~5Lf|8@x6|KXVobL$yj2V49(W3B8qI2k%XjF1Upu3f*1JhsG${8u(Gj3hdcLx# zH7nDE^y+u#`2|x)jb&P^?Pd(V9~6TvlB5CdC0ieO0n#j&eWTV{GZG`-*Bi~OLyH3k z7BYJyXtOsBVqsX>!}}aQ@k+X6y2M zhZ-&ro(^iR@%B5NF+7K%+7gz(QgDy5(Udgdj1%BcceToQJ_m2SLE0aK-naUQX%dDF04Nz5*nIpPNg2EQEND$FdcIgP8C zkVFj1SX<@X)&l!=f!Dfc3i~yL>IV$=t*k|>@!tM*G3Z;cWM*JAGu(@N_6za%ys=7| z@fWo=don)t2d2v@x}MiOP~B)0E!5(jz^)w-Eq!gY`}k=6#Ws0+SRG! zCAK$+uw9o;lWSX0qV~e4zn%D;2@l2>Z06^3ljK@woxoJ9?$1UTw_gM<#>vmhf1akA zPf=Sp=)pj{O_LWA9=4q`h3-nmb}*6sQ&7k#jj5Cuf~%Q>HMUeZ6Fb$Gm#3(L-gcQV zinZWcxwgBtyX=6JYbbyJie*KiGsK?@0#m&JQ_SFfLb396vjUcGzijmL|7KwbF)?XT z(ZAJ9SH$8X$Pzzy&{@?&U2r@9{YHxRlc`>U^%yZkW!D69p3|aB%AVZSk};ghH0pw8 zAr1uR($W&XGI<|}k(w)KqkpUY$2Vq65Vhz3AZ7XN0k`Lm%KtnyeU$^?x;w}z=}M(R z1Er7w(>v62H#3~abp6TK4qj|DFKfbx-FJ(P$*^M;EDgxNj+eh3Y! z$Wz*Q`dOb{o>WX)vQ~< zF=v%`O)KKWKllaAP#H@g)8g#T=#{Gf*b6i~W@buyZV%aM)1*6w0*LN#UOZ68`B{*^ zkfTn%tgzMSA5>iCpX=-)EIPtZH%tC8$>l$$2<~l}CNw#OyeP?Ksq_5h|6s;9Ic+rN zOdeX=Ad|m#&-?N}CB@%geP8mF+#=dwu~l-xASoX*Yff9`pVNw%tf80YL_@|Fq@ny@ zZSMiq)Y%7&w{NSqD%wCXDkv31>i~)aP?lAMDuPuMlmt**RUmMg%5)Wj2sok2auuow zhzPkPKtuuv%1}W@5=Ib^ksu&M2qFL94OX*y&i^~-``+`Oo|b#_+~@f{zdfVa=5wlq z5o2D*s_w+M`Y82M=RT65tel$^EWdMiPw8e>b%?&G=mDVjlD%*&f0<)hG7goFbTZc% zu5a^JXS3awWgT^BFS(d3jpG2I>siH0hNV%uDAM;ph(IHZc?wo4wIq&*?eP8aRSMJ1 zjXHNwW?PW&vKln-P7_?*BF<&+dSCsw(k4914GRoEBXBIWpon(o98fXO>xTbN`kDoV z2C>jp?TKEKkWRK1e#>gH_tpQgRT?W>h>R00ScoO#cf;ja$Drkr==2!U*OG!AhgAP5 z_6@^3t2l?pAtVbkgv5P8Fr67U#rZ_*N;H_(#1 z_I#qLrY&@s`^>ka$U#_&{_r2@G=3_LEOMM}FhDland9_n*|oAGFI%NvX`OUKaf{n*7@JsWndiS((mo;ZLfx+~Cksg~~L-t5r^ zr|RWzDl4R4g81TWe~u1fzC;uAG(^(po|n{v_FyNojT(lMoR)L*me+7w0-pTcHbN@G zZ<->F82}@@1F2fP;ZwEKeS_*JeGYSi^>;my=4xGJqqY8HOX!(F=Xc#?(k8;w>LotYgVhYrhql_Sem*kC zzpa^o8?Uj%P49(p_?kG^E3}>$yTk3+Uo_Y6%v|DepiuO?O;P@ z7o=LX==8h(6Cr&~{3&5Zb+A$jlW#$sPO5$Suo;pyZV{lXuLNNxfMfgV2yoc5YHMBk z7#?l6^G*?1CBLgmekzKLJ|93;$^T#>3=859hF+1hx%Z`9g>ATiJd$l$_QuVtGexJ` z#G0CQcWBtkI!oC7DMC}p?|8{2m15A&yEQc!29;%nQ05E-T0^)Rw0o#4`pm|3)^Mt< zBEm^YID8=HvdBA05((S#=VyEX@A>kl5||5lR`n_D4xv?C=fo_uIBVTPsAR%_OoGI0 z(_9f@J{cX9Cd>6uZqYI_a=IJLEGyQnbqTqZdBU#F+BXT;AV9|Z15C??jf^S5cNdY> zs2#+aYb9AsgKc}TX_>!~I}g`2@%B@Mr}3}r0gOrUP?fvXeK*%Q_!StGKyLJfhN)(@~PR7W8llo=K@%jno&PVA?aW>@z;U z6kzO1BQv{n732+u09rV{ zaCP-bwT>z*C?Y`tgzfDtm-QY=hG%=l@gVaUaQ4wK!{T5lv!5RNVQ9#1!{WmiGX8Rm1ZmN1yjPV|98zZhkh5syS<$4vfM#Yr@I-iA=;E#k)9Zw9H9mfUb=^eR{ zT6UP;*@;KgAcH!$imG;Hni%wx4ME^`bA2v{-{fNIBH336*Hl|zU(0nfpDVBZ3r#z zRD+If@4>lHC#~~jEWpK01uJP3YP7|r%#z`i_jabE3H zF_1$XONIv-UEDBI;5A(wYn2xC0D7XdWb;$v?1)wr%GCDBu`%c1;SQYBf4+!AvR zn6Bx5ycp33&%2}(3=u;;&F^)gw%m1TaX4lP5EG0<1@$}4a0%nSNkJryK8_L*F}|KMr%QNL{5Chm5}C^7Y(bg|G03fG^Qc+EI#%Z^j~F zw!xCtVD!>zKmD6-SN@Ies=;nbNc1NllC_R>yVmTNz!BKC*sbfPNy@%TW}3$RW$Xzl zJur7!0xr5daXJPL#DW!K?Nq!Wrjdpg0!@%s!nSvg>%pCx^! z&`^;-2Ptnn>L*G%z1kNWjUcVOp<+9-J1@u@OrpZvu}Cb!{8>Onk>UQ{7$g)g3Dfbr zV$bImc=<<*(<&s99&Vqj!?~P&rQ6@EdSBP4LLG^89t$F9ejd;|DKqA-qY*VwTvGSN zE%cJcYAs^oQJ?$Zdh9WTyGf!6Nm7YUO;toD3MYFgWHh2 zgf)=(vJx~X;Y~)^HA$^*JJZaljabE*Bp0(t;I9Pdkn!WS&NJbmBz;>X^cS{>+RSe*K9|zpWmG%|M=1+%p1DleUcRU zW+B4Qw2f+ol2ISDpt^{?^cdO?9+@j1oAH}MrM5(5Ii!NeWC%R=+jngTVii}`f5}ex zmqBb{ltVCCM#y1&?Ov1xaUmOW<sw1 z!F?YWvJwg2?t|%_u0O*|pBaS5FgT#wcDZ$}yG(0|F~h|{DF46}&jt8Ri=-|Qj*bco zk3(FioDv>+3tB@-@k%w;lYq59AlEUTAww5Rq>qB!!}~)l8Ivt6B-sk9rnJ!0ZR(pN z0i-5FhDXnOYKJW0Op4T{QRs<1$)=F$Z9`Wj&V;w5$ddKM#2Li=T`V#vmB|sAkNKa} zRS}{2QjRyKI!U-}8?-H19miRPp}_y3t=KD>UmIw0%+&4`I$>#H<9vdq?q%cgW6N=I z*w`0IOD5ckn0l#);4?PPedn@-6K+uJrGcS2MwODbt=d8aDqToj?Opv=qG_dJe)nN# z&$z;r@R2QOGBnk~QUfJ1Q7IZFX>~M@H26AK7Odg7Xpo>|fGBG?Y`Z$Ho;Gpz`9h@J zQ^ld|r4BX~^~<$$X-IVPb%U3`T@N@qtIK0(q{SRk#99{KC_c)BTrZMsdiMbpOdB&Z zd>?X;YbMZ{sfL1*{qQtaLIe%m|DA}G!E|VMkLQ!)-S58e5gyMg4D|w&;&iq7=c*P} zx#aVwPdl{PBK4W zI?)zP>5)L7K8JQ9g<>)N4`^xFJTX+?D@=|*_c>y7qQ!MF3>1e7h8D8gXz^r|6=@X6 zWg1aVyJ=O?FHLu6QWI_zw;d1sej!VAQQVtR*wNfgj?9K4ssi*tb#X@w`T;bnU-cyY zVGuTYjC-pI#UF#7L1Cxgdj%i!{nl8EGoGiS6lci!R;lcav!nxvQeAp9IoJo+#r~}^ z;Kcgzkgx6x?b?6DBEdXhB_~kLRt?7O9JJ1#d%s9RhZ%Wk#4D!+F`z((=aD=;(>O7> z@MtEjv*lT6mW29vk-DW}<=@0C{KeiWflAc2H!?FYc`q^L0&ch?3)_{Pr4AKF25? z$N>1v>BFy}>4&NZ}mxCu)wm z8qif{_b30A343gC>|;@j!&YA_#;7+f*(TA}=ub#dwZrOMzi`an7$cG8{m=tAzBR+N zerU6j7Pm7>sYQ6G_Rj@usx&iD4U3BpY{%h+;$YG>tW*W-(;^w@R>x7g3o82?(*7xI zI2jU}w12M^WtH?hZZT+sC;O3q(o%2(S(z!tZ401iaUO}A;(fNQK<&yag%}5p?`L)C z&;oXa^iH5-8s_rqAqxtZu^vT}4#{d|zhz_%nN)F4SSLVrH#mP5ESMcA<7*PXqKdSb zzqVu@FV^XO_a~6pD%KD>>oKQw`&KS1|sI(@MGB1ucZFhUWEa32kM7|r@}NG1DS2XEv$y4 zX=d=tLY45!NMYFO3;*%b3@c)*3KF@FI+@j1sXwYvcl&T;I9h9N*+6{}y1w z5%7CNp{?PB7cbMXtYk_37QYS@crvZuk~qJw>$DnanH9xW8t1?efLz5kvf!gf6L|*B zFu~j~Q0CB|zCkt;^&k1Jy=txuzSyfHbUJ-NYP9Iog!ndi`)_>J=N1h$Q1S#YmU%i33W5?=i-o0M ze{o~y_K@R|%$CiTL~y3gJkV93UDb5wJ11#_PuKhT1@vIU31~^W<7~zcl2WijOE(G9 z(Dxspe+WGvzfb1o2%Zecm1JvURiO^+w&PU20-jn|VWvvw}!ISN13sF%`-h%knIrIY7)%IUArJY33M1C_-8Qd7z zBR_A%%;7JpDjR2bhIW-4KWxF85&ui88F~~%uPjPNlF@m|*4%xs4dn4BEegHvlDL&& z`jEER<+!NwngZ~~{(!8WL4pF{)xWwB%Vqf&haB&B?$V#2QM>m92YG8N+P(#Ad@093 zblH3wzjgUdlXp3iybZ9MLMF$@)UA=<9$YTj=$tm^tc)V+UkPRN&h!~%^jmSyIT^hY z24z(Bp6%<6M!$umXDsF-8I7o4fcT(EV2N06`w_Vf&VryHZ>^9d4rt0=70mei_}bmR zXK+3-nwX10!t;VY)A@;`ybnr5WVBNlyJ5x1`PAe@TIo(4CR-1Jstj>OX$uN2AkQU{ zdXj9tVRs0G5fl)i1Olreci{B&`V;Ymlvu>^NH; zC@5W$n5BRP!or&x?yi<44;>TEr&1S@=t(0K78!C}U2M1oS$gt&hv3F@KcDqUs>`86 z$^z233UUY9ILfVP#JO>0<0i^2ay;bkI0LVDVQg;cq=?jVVJbZUKupry(%~%Q%;GDY!)Wc(8^% zQ#-wknmw0rGea;FYra$eS~%}qtB%0}j)pG#k=Y=$hB^~ zToQY^ZV6|Cx*wki&A$cR)pFK~#@;}^uEt_^Xr{>XIFQ6di=WH&8@7MyL$m|uRyeZ5 zgg0du-aV666cYNdh!uFPE3EfTU{XY{pjkj45UI-1PIYOX`RiN{R%X&*ifOWop5In>>%NGsu zZ1Q+BjzUpI+P;TzE!gN?Nsb?ZzUMlXoY+h>GJ)EoX8g=9d5H90Ku_?;3UV5od$;65 zW9%bqIGx&nLOL=i1k;x4a~mfLyXkYOkGn?w;#0 zb5Byefod7`6`5ZLh{xvbA0?#xMqom=BLvBH=^XZ zCvP=pr@3mJCUmMx#l{lJ5q#_Jg-FQu;u4?#c+3q*BwITo-)rh>2OPr&Dk-?>2(;T8vl||EA{2;9qVbXa})q6I6#RPz-kmPsVVVTTd|WnL?gq0v$QUlq%uviXzW& zf-)7N*ARL@0b*g{GF8Z|Fg8%ge}MKsb(Q_%KZ6w^>kgYYDrKw#+|grNqz^jCaQ7T> zZy1SlPuC)i;0SH_v}{}lsq2oF{ag=> z=q?CNGp=q|a)9<5a-?!<#Se&i=#0+uzAHLG088Xuz$qYr`;B-L()`?OGc+5u5f6fr z7K1b%nORaPq(Ltmd6-x+>adv;a_+J!W`DnEG3TrpJ*C5>Pq0!@*}!JH9%mwZ4q~Ta z64jBSkS}d?&;+;Jj|%QM?E*@7Q9-VKDxLk}(IohQC4p|0V^z>hvNJT}6qJ+7ZvDUS zUJ=wNQ$9y6ln}cM@`B9Lf^|fz4pVg3Cy7GAZ=vuliKslPFb{0d>S!@to=JI?@9b=D^lwkCEW(8s>|cmrdVLc7 zY7IX*$uQMzXu`X{OZ4R1WYB>=$RTN=7Ku4>BLF_vGEFCG{(>T&i|C`%quG$(jYUl1 z{}P^V7aXO4PVQm;Hp(C0x5+11RMdO#DfR;}DY03GG8}$n1xL4f=AirC@gNIUukF%g z&nCfGuc#T?g$>oy*!1^YK2G%NfpGlogx)H!1|AfybrEb!;;w!TB_SdnE}*yk$WAje zGiKqL{y^bGuLiySI{LU>v_ekTPApw#q6Wc|RsFuNo06&nlAjN7r&X&pLgk;5*A)fm z>ff&tu#Ph0kGn5u&B$LNFVjrY2*hvXvyCMX2Wy<^od;GRMpe{C0b8xzxrdz(7a3Se{)%b<#b@vMkI>Ji}$k>mrbW>+8o_sq8e(m&kAj}{w^V-zzY=P~x+}CL; zd?8|Y93vy05e2%Efd6`FDrxJfwCb3>oDHyR_;Gv`wrkh~;Sp82UN|J+KTt)Lxu#$8n|2A+){ni0wHob?4Sd4fWM(^R6RaCnDb1;H7 z`m*w}_|KKk{zbpNb`e3H*`Y2izDD;M1==(P-@jZRuu72Ja@1Wv`JKNNqF@J33mwn^ zYnJHrRu;kis}BW>nJeq20D&~Pe?I8$YiL}IHS54Lfwt~q&#Y=ws@~GZ!}oN_aM=>X z2YVRpsdc>T?BbH#=eS>+H*3bquRm>8f*_m-5o+{@ZMj=Qn%kQ7kdqnl+F~q^zw{a1 ztCS5r5BG;^6nAYgI3m7Ud#9_PpH?UvhADgOKlfb0vscOvnS?j32Hl+x#M`?c`5D-N z-iNm{ba-Bk^3Uw;ITdu=0$pZ;`0RW9cKpj4%A25}3hB|hG%+Rg-2My{)obX$47C&o zJGCUTqMf8D^H;^912@8F#d%bTaqj{3X9-S}(Gh{V``F_#a;e+=8D)BP5hd0^`MV(5=x2=@l*I;mjvBoUp4QDIz8N>KZ z&5G81+pQc>=pKDe<<`@;1CR^apH=9{pC^!-~V2Wfb;PVf6_rXP50(Q>Qdm&J*o%GR~T~T=u31 ze~mCE@dd>Q)s@c8H1&v%^GJcH>+D}9R4f&6kl^1r27=-}b zTyFFxBQZC1;3%a{%aWR{PJO-L(V^u?SVeU#MDAqalgn6;S?3Y3emIdE{Fjq{XoDwp z59hZ@b|1g3{?Jd8%3)u;azM9>t0|AV+AfCaRB$_#Yfs&#ogr#ej{8cMO)L(MWBPP2 zZ0%FAyltxpQ6F7aHWZw_^phe!oO2a@cf~GS0k=(MlqA((RKRTyY3ueWq%C^~&TU0V z^yYU?D1lRp@lZ+Gwtg!sEEe4*3!{VD+B^pr4o4w99bBu;+wQ`7s_-*AIL<4Pd$K|* zLYuVzvcmIuY*fex4o=bL@gFcs6w09643E&goSpmSB*Imr*0;3^L~1Phrvf$NKUi1V z&j@`bA}6l_A|=Xk-^*V7gFB1@_m$uWmKDoex3V;>iA!Xu=+Sv)zH5Y?tk6$ehQvee zRE0c;dz7J!f2M$C{m1S_p_n*mpVZe$tDNFaiPa13EI3wXtdW=&Bvh&YSjBA_!rp8R z{jOxqkZ5IonR488IVjv_6gYBfF|APoyZ+ZuL3_75LA~;=EuCJriC{q01y67QvDh9?>S^ab+GUJN7yru}G zPzNuc$E4C1WmfUa{~AQ5s%N_XZMFkAHWLQZ8vato%1O4@q=M`WY;ETXLV@W6gSHbOiGiNSbT6S;@-Q>YscZ5_@vm z5iuj$XR3m=^J0yr_9b9;wxn+&(mQPHO5~GaW0TUm>f)p{8A;Yd)U4ITmYtM8M9AOD zns{j1m#QJkRs<{x%TV=aml38Ap_tYsyR1aSE*jl($+eaE9#bmfig{x$UDm=z<#UrZ zo}>e!#jC~srtiAgsnq0!bLytk48XXb4iKJX6 zLC*`)Hgvzj*WA(O#mlo*3X3-Vs%|XW|H*WcaO_ODX;;CB&bUdlRl*A+$Sg=0tBOzp z9p{3+$aSIp&ERzjnIgX}!J+Ax;`3st+~w3@*sDbGf4m-@w*2?8{)At!*H4(){{%l0 z|MK%c_FDbVMWfki2Rq<7$u_QS%N_l;_iwu4=ZRnYC}y1TZPGo&tZ%!3_q(@ALX3b^w>-bY+19D* z9PgJLb8arK;dR?9jlt!u!A1+Vr}v#NI7~5pp|@t41Mi#XQ&`j_Z08jP3AC3B&Ma|Q zO9;++ehy0?)%2^3d!`w zpWD(mrfjl!R=Yf4wa1*V?Px7i4fLly(;Gj($J<0B%P&x$=GgJZ$*A+Iy)lsiRu!dM zRU7i)9O!V7`CoO)?4vubn+<+rci21np{9*Y~*pLw({J?)28kB??2veA%ZE=_7Te4pQ;Clvcr zCiLmq>QOZ2!`Ql22aol;6BY>^OU!Pr?H+k+S5v$E?F9MFT|%DUyp1XTbR~xRPeE1f zPVS2Wo;I7DoXe)pIQOnO=I6r>X;%g5xA*d|Or!7NS7-~KRd4;6ig!6o&tH*0>YOs% zyukJ0p<0!FtltTZ2O?;F{^uJ1Jlw%l4u~P-|9Gae&+y5TgI=`az6L{Ql{-NWod5au zTA7dZ;@0GB9|>Q=o+zbxG}QJiq2)P6+p|v*48#Ar$2NUu^Q`u8e4o|hCG5-IYi3+; zQl};A8gA#3=DDW$QQ z40%8MaE=AP^7g8bw@0)`f*91L5!>svo02K)R85EDy#+4{AB;F=ZF+d8;v@*e-Q zOJj6@H}jqTyEbAj+rXE#aHNb^@@~3kL0qHD-ujN&MX{TjXL>c0Xlag(!b>0a9Bb>_ zm_6uqcB006S%9JXui?sg1N5mWF~#tD$+nF81@?>F{}3KFX8&4KYsOY<@)ek0q$%U| zbExIbI#%(#XP*wzUAo9FExxY|E<(5Ck2$My8gExw6NhS-@E-5|b{jWTjcmLjbYz9= zB1&2G>qz4bBP;|T+A93f*F=}ptiDXmZ(4=W{v~69#`u}NIji&;!}-|hz%*~QQCTfG9|+Qp-O+2p^Pga$txKgpVnH#`2pI;Kkyn3X?>_dD5_*RDJM zMDYD#r`k%U)}aH2>Ob=do8x)!Tklx2f_L098!T3yV7Q9LY_~@Tjy)gtbx61q2(6j|cIlW36H8*?>Uwak`Q%)RHeI@)Q z7Cm`yH!pr*tu4U)JGG`%x@Ii0!k@Rm>!f;KF}CmC7u0{^XTB~^R11?@%Ke~ zsa1Ht;t!XSW82?P_x>(TkVy(DJ-w?fioeQiw^_oE>Zc1#v)b11Hm3KDIsO{wR8aH4 zWM%ic*hn<{%7Y_IZ69Yott@B$J^So7(#hK0-_3VqyO$Z0uQ%aG!Y$dl!`$al&wtk# z5_+%cwq?Csd{&A3+6DI< z9a5RgKSghZ{&YI$xOQogfnj&uUnQaegv|o8fsc!?cF-J0iw|V)AA1&KeWLBHGV;j7 zyl>Fm?c9pgrP1$mgXSe^Hpj4k`k!$^^t!K7=&th{Lk#;K(I)bC>aAJPVy42MMf#eO z(e-Thd}2pz!Ml8=8?5j0!j@!wZ;UZ}&6JV93vSs^XumsOp8l6($P51Smv@cRJN}sW zIIYejjrVkspott};r1%sD*Iwc`bL9xU3H&}4~p+wJ-T?0?eWe2Y0s*|Hm~H&YFJ`O3O{-3L~^xNFyW@aC!WjlW8J<{qO^0rL-{egbnj^g2py+3um zE)3gr=-Ij6Yw7OyG7I#1Yxa=%B)xa< zsQwwXys{PPVTJ+E-Yy4hdR7D7b((*T>%13o$n%d^rW({MnbD_c1d%}%t>ceo_H>G4 zY2PtFCN&S~tz9A^wt95cSLal4SGiA$t>tt?OEB)h&?Lnqrr>kZl&F6AS=Sw%Y6V({ z+Hps2vOHCzl5JS#A6sV&(uogZ#`75=UzZHL2(H{LN`2z@<1llpV^&gW1iQV88@1~n-|db&%H|ezN6z07x%bPnr2^g8B@(V z=%O|wAkq0nhCSufe*CN(O*~Td6uN&}$g$y%s^d8Xt+*bo&kBx=zVrX{KPU38>Mf_5 z40N3BD((_#h}EVuDT0db_U=K3!!`m(tVc}*hHv$6HV>L1em=zaF075D6^}ejc6m3! zb`qKPWya~2RdycFbAEXZeSL@)(y6^i{Hn?g;Ye_w9yeFe9!l#^I%e#omk=J73E_o? z1>4UJ9rnp{&Ne1mvxY6p*dreuDxAhae&5e8J>R}bHP0=&<2-xBkwZPw zTNT&4mM!wuLCOZ_tT#@;UVq{8;%Go4^h4bFVuabYF`%N@t0W~qg=##r-q#dD^`ox$ z#jmwg4S22d5D=}2q92)s<4jcwxZ>-5wRwA5%#TbS)4A%L8^PGWF}UM8>K5I#)GU^P zsh_15DPiq|fQqLd?a{!xK2u~d8hoU%ieE#NKm-$+V4hJ9nmFq?iMHPyf0XGOB%s!%hcFagbCT&s@g)$J1|7>Sqr eKl%sU+S}C@Mk&P;4PU|kwrsYw$k@2=^#2F&Du=-U literal 0 HcmV?d00001 diff --git a/ouroboros-consensus-cardano/app/DBAnalyser/Parsers.hs b/ouroboros-consensus-cardano/app/DBAnalyser/Parsers.hs index b09448ade6..b72a969311 100644 --- a/ouroboros-consensus-cardano/app/DBAnalyser/Parsers.hs +++ b/ouroboros-consensus-cardano/app/DBAnalyser/Parsers.hs @@ -18,7 +18,7 @@ import Cardano.Tools.DBAnalyser.Types import Data.Foldable (asum) #endif import Options.Applicative -import Ouroboros.Consensus.Block +import Ouroboros.Consensus.Block (SlotNo (..), WithOrigin (..)) import Ouroboros.Consensus.Byron.Node (PBftSignatureThreshold (..)) import Ouroboros.Consensus.Shelley.Node (Nonce (..)) @@ -44,6 +44,20 @@ parseDBAnalyserConfig = DBAnalyserConfig <*> parseValidationPolicy <*> parseAnalysis <*> parseLimit + <*> asum [ + flag' V1InMem $ mconcat [ + long "v1-in-mem" + , help "use v1 in-memory backing store" + ] + , flag' V1LMDB $ mconcat [ + long "lmdb" + , help "use v1 LMDB backing store" + ] + , flag' V2InMem $ mconcat [ + long "in-mem" + , help "use new in-memory backend" + ] + ] parseSelectDB :: Parser SelectDB parseSelectDB = diff --git a/ouroboros-consensus-cardano/app/db-synthesizer.hs b/ouroboros-consensus-cardano/app/db-synthesizer.hs index c74d7bd79c..43c6ccae59 100644 --- a/ouroboros-consensus-cardano/app/db-synthesizer.hs +++ b/ouroboros-consensus-cardano/app/db-synthesizer.hs @@ -35,6 +35,6 @@ main = withStdTerminalHandles $ do cryptoInit (paths, creds, forgeOpts) <- parseCommandLine let - genTxs _ _ _ = pure [] + genTxs _ _ _ _ = pure [] result <- initialize paths creds forgeOpts >>= either die (uncurry (synthesize genTxs)) putStrLn $ "--> done; result: " ++ show result diff --git a/ouroboros-consensus-cardano/app/snapshot-converter.hs b/ouroboros-consensus-cardano/app/snapshot-converter.hs new file mode 100644 index 0000000000..3a3c0da137 --- /dev/null +++ b/ouroboros-consensus-cardano/app/snapshot-converter.hs @@ -0,0 +1,259 @@ +{-# LANGUAGE DataKinds #-} +{-# LANGUAGE DeriveAnyClass #-} +{-# LANGUAGE FlexibleContexts #-} +{-# LANGUAGE LambdaCase #-} +{-# LANGUAGE NamedFieldPuns #-} +{-# LANGUAGE OverloadedStrings #-} +{-# LANGUAGE ScopedTypeVariables #-} +{-# LANGUAGE TypeApplications #-} + +module Main (main) where + +import Cardano.Crypto.Init (cryptoInit) +import Cardano.Tools.DBAnalyser.HasAnalysis (mkProtocolInfo) +import qualified Codec.CBOR.Read as CBOR +import qualified Codec.CBOR.Write as CBOR +import Codec.Serialise +import Control.Monad +import Control.Monad.IO.Class +import Control.Monad.Trans.Except +import qualified Data.ByteString.Lazy as BSL +import Data.Functor +import qualified Database.LMDB.Simple as LMDB +import qualified Database.LMDB.Simple.Cursor as LMDB.Cursor +import DBAnalyser.Parsers +import Main.Utf8 +import Options.Applicative +import Ouroboros.Consensus.Block +import Ouroboros.Consensus.Config +import Ouroboros.Consensus.Ledger.Basics +import Ouroboros.Consensus.Ledger.Extended +import Ouroboros.Consensus.Ledger.Tables.Utils +import Ouroboros.Consensus.Node.ProtocolInfo +import Ouroboros.Consensus.Storage.LedgerDB.Impl.Common +import Ouroboros.Consensus.Storage.LedgerDB.Impl.Snapshots +import qualified Ouroboros.Consensus.Storage.LedgerDB.V1.BackingStore.Impl.LMDB as Disk +import qualified Ouroboros.Consensus.Storage.LedgerDB.V1.BackingStore.Impl.LMDB.Bridge as LMDB.Bridge +import Ouroboros.Consensus.Util.CBOR +import Ouroboros.Consensus.Util.IOLike +import System.FilePath (isRelative) +import System.FS.API +import System.FS.API.Lazy +import System.FS.IO + +data Format + = Legacy + | Mem + | LMDB + deriving (Show, Read) + +data Config = Config + { from :: Format + -- ^ Which format the input snapshot is in + , inpath :: FsPath + -- ^ Path to the input snapshot + , to :: Format + -- ^ Which format the output snapshot must be in + , outpath :: FsPath + -- ^ Path to the output snapshot + } + +getCommandLineConfig :: IO (Config, BlockType) +getCommandLineConfig = + execParser $ + info + ((,) <$> parseConfig <*> blockTypeParser <**> helper) + (fullDesc <> progDesc "Utility for converting snapshots to and from UTxO-HD") + +parseConfig :: Parser Config +parseConfig = + Config + <$> argument + auto + ( mconcat + [ help "From format (Legacy, Mem or LMDB)" + , metavar "FORMAT-IN" + ] + ) + <*> argument + (eitherReader (\x -> if isRelative x then Right (mkFsPath [x]) else Left $ "Non-relative path in input path argument: " <> show x)) + ( mconcat + [ help "Input dir/file. Use relative paths like ./100007913" + , metavar "PATH-IN" + ] + ) + + <*> argument + auto + ( mconcat + [ help "To format (Legacy, Mem or LMDB)" + , metavar "FORMAT-OUT" + ] + ) + <*> argument + (eitherReader (\x -> if isRelative x then Right (mkFsPath [x]) else Left $ "Non-relative path in output path argument: " <> show x)) + ( mconcat + [ help "Output dir/file Use relative paths like ./100007913" + , metavar "PATH-OUT" + ] + ) + + +-- Helpers + +defaultLMDBLimits :: LMDB.Limits +defaultLMDBLimits = + LMDB.Limits + { LMDB.mapSize = 16 * 1024 * 1024 * 1024 + , LMDB.maxDatabases = 10 + , LMDB.maxReaders = 16 + } + +data Error + = SnapshotError ReadIncrementalErr + | TablesCantDeserializeError DeserialiseFailure + | TablesTrailingBytes + | SnapshotFormatMismatch Format String + deriving Exception + +instance Show Error where + show (SnapshotError err) = "Couldn't deserialize the snapshot. Are you running the same node version that created the snapshot? " <> show err + show (TablesCantDeserializeError err) = "Couldn't deserialize the tables: " <> show err + show TablesTrailingBytes = "Malformed tables, there are trailing bytes!" + show (SnapshotFormatMismatch expected err) = "The input snapshot does not seem to correspond to the input format:\n\t" <> show expected <> "\n\tThe provided path " <> err + +checkSnapshot :: Format -> FsPath -> SomeHasFS IO -> IO () +checkSnapshot m p (SomeHasFS fs) = case m of + Legacy -> + want (doesFileExist fs) p "is NOT a file" + Mem -> newFormatCheck "tvar" + LMDB -> newFormatCheck "data.mdb" + where + want :: (FsPath -> IO Bool) -> FsPath -> String -> IO () + want fileType path err = do + exists <- fileType path + unless exists $ throwIO $ SnapshotFormatMismatch m err + + isDir = (doesDirectoryExist, [], "is NOT a directory") + hasTablesDir = (doesDirectoryExist, ["tables"], "DOES NOT contain a \"tables\" directory") + hasState = (doesFileExist, ["state"], "DOES NOT contain a \"state\" file") + hasTables tb = (doesFileExist, ["tables", tb], "DOES NOT contain a \"tables/" <> tb <> "\" file") + + newFormatCheck tb = + mapM_ + (\(doCheck, extra, err) -> want (doCheck fs) (p mkFsPath extra) err) + [ isDir + , hasTablesDir + , hasState + , hasTables tb + ] +load :: + forall blk. + ( LedgerDbSerialiseConstraints blk + , CanStowLedgerTables (LedgerState blk) + , HasLedgerTables (LedgerState blk) + ) + => Config + -> SomeHasFS IO + -> CodecConfig blk + -> IO (ExtLedgerState blk ValuesMK) +load Config{from = Legacy, inpath} fs ccfg = do + checkSnapshot Legacy inpath fs + eSt <- fmap unstowLedgerTables + <$> runExceptT (readExtLedgerState fs (decodeDiskExtLedgerState ccfg) decode inpath) + case eSt of + Left err -> throwIO $ SnapshotError err + Right st -> pure st +load Config{from = Mem, inpath} fs@(SomeHasFS hasFS) ccfg = do + checkSnapshot Mem inpath fs + eExtLedgerSt <- runExceptT $ readExtLedgerState fs (decodeDiskExtLedgerState ccfg) decode (inpath mkFsPath ["state"]) + case eExtLedgerSt of + Left err -> throwIO $ SnapshotError err + Right extLedgerSt -> do + values <- withFile hasFS (inpath mkFsPath ["tables", "tvar"]) ReadMode $ \h -> do + bs <- hGetAll hasFS h + case CBOR.deserialiseFromBytes valuesMKDecoder bs of + Left err -> throwIO $ TablesCantDeserializeError err + Right (extra, x) -> + if BSL.null extra + then pure x + else throwIO TablesTrailingBytes + pure (extLedgerSt `withLedgerTables` values) +load Config{from = LMDB, inpath} fs ccfg = do + checkSnapshot LMDB inpath fs + eExtLedgerSt <- runExceptT $ readExtLedgerState fs (decodeDiskExtLedgerState ccfg) decode (inpath mkFsPath ["state"]) + case eExtLedgerSt of + Left err -> throwIO $ SnapshotError err + Right extLedgerSt -> do + values <- do + dbEnv <- LMDB.openEnvironment (fsToFilePath (MountPoint ".") (inpath mkFsPath ["tables"])) defaultLMDBLimits + Disk.LMDBMK _ dbBackingTables <- LMDB.readWriteTransaction dbEnv (Disk.getDb (K2 "utxo")) + catch (LMDB.readOnlyTransaction dbEnv $ + LMDB.Cursor.runCursorAsTransaction' + LMDB.Cursor.cgetAll + dbBackingTables + (LMDB.Bridge.fromCodecMK $ getLedgerTables $ codecLedgerTables @(LedgerState blk)) + ) + (\(err :: DeserialiseFailure) -> throwIO $ TablesCantDeserializeError err) + pure (extLedgerSt `withLedgerTables` LedgerTables (ValuesMK values)) + +store :: + ( LedgerDbSerialiseConstraints blk + , CanStowLedgerTables (LedgerState blk) + , HasLedgerTables (LedgerState blk) + , IsLedger (LedgerState blk) + ) + => Config + -> SomeHasFS IO + -> CodecConfig blk + -> ExtLedgerState blk ValuesMK + -> IO () +store Config{to = Legacy, outpath} fs ccfg state = + writeExtLedgerState fs (encodeDiskExtLedgerState ccfg) outpath (stowLedgerTables state) +store Config{to = Mem, outpath} fs@(SomeHasFS hasFS) ccfg state = do + -- write state + createDirectoryIfMissing hasFS True outpath + writeExtLedgerState fs (encodeDiskExtLedgerState ccfg) (outpath mkFsPath ["state"]) (forgetLedgerTables state) + -- write tables + createDirectoryIfMissing hasFS True $ outpath mkFsPath ["tables"] + withFile hasFS (outpath mkFsPath ["tables", "tvar"]) (WriteMode MustBeNew) $ \hf -> + void $ + hPutAll hasFS hf $ + CBOR.toLazyByteString $ + valuesMKEncoder (projectLedgerTables state) +store Config{to = LMDB, outpath} fs@(SomeHasFS hasFS) ccfg state = do + -- write state + createDirectoryIfMissing hasFS True outpath + writeExtLedgerState fs (encodeDiskExtLedgerState ccfg) (outpath mkFsPath ["state"]) (forgetLedgerTables state) + -- write tables + createDirectoryIfMissing hasFS True $ outpath mkFsPath ["tables"] + dbEnv <- LMDB.openEnvironment (fsToFilePath (MountPoint ".") $ outpath mkFsPath ["tables"]) defaultLMDBLimits + dbState <- LMDB.readWriteTransaction dbEnv $ LMDB.getDatabase (Just "_dbstate") + dbBackingTables <- + LMDB.readWriteTransaction dbEnv $ + lttraverse Disk.getDb (ltpure $ K2 "utxo") + LMDB.readWriteTransaction dbEnv $ + Disk.withDbStateRWMaybeNull dbState $ \case + Nothing -> + ltzipWith3A Disk.initLMDBTable dbBackingTables codecLedgerTables (projectLedgerTables state) + $> ((), Disk.DbState{Disk.dbsSeq = pointSlot $ getTip state}) + Just _ -> liftIO $ throwIO $ Disk.LMDBErrInitialisingAlreadyHasState + +main :: IO () +main = withStdTerminalHandles $ do + cryptoInit + (conf, blocktype) <- getCommandLineConfig + case blocktype of + ByronBlock args -> run conf args + ShelleyBlock args -> run conf args + CardanoBlock args -> run conf args + where + run conf args = do + ccfg <- configCodec . pInfoConfig <$> mkProtocolInfo args + let fs = SomeHasFS $ ioHasFS $ MountPoint "." + putStrLn "Loading snapshot..." + state <- load conf fs ccfg + putStrLn "Loaded snapshot" + putStrLn "Writing snapshot..." + store conf fs ccfg state + putStrLn "Written snapshot" diff --git a/ouroboros-consensus-cardano/golden/byron/disk/LedgerTables b/ouroboros-consensus-cardano/golden/byron/disk/LedgerTables new file mode 100644 index 0000000000..874fe2c986 --- /dev/null +++ b/ouroboros-consensus-cardano/golden/byron/disk/LedgerTables @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/ouroboros-consensus-cardano/golden/cardano/QueryVersion1/CardanoNodeToClientVersion7/Query_Allegra_GetBigLedgerPeerSnapshot b/ouroboros-consensus-cardano/golden/cardano/QueryVersion1/CardanoNodeToClientVersion7/Query_Allegra_GetBigLedgerPeerSnapshot index 6a4849acd4..a679a948ec 100644 --- a/ouroboros-consensus-cardano/golden/cardano/QueryVersion1/CardanoNodeToClientVersion7/Query_Allegra_GetBigLedgerPeerSnapshot +++ b/ouroboros-consensus-cardano/golden/cardano/QueryVersion1/CardanoNodeToClientVersion7/Query_Allegra_GetBigLedgerPeerSnapshot @@ -1 +1 @@ -ShelleyEncoderUnsupportedQuery (SomeSecond GetBigLedgerPeerSnapshot) ShelleyNodeToClientVersion4 \ No newline at end of file +ShelleyEncoderUnsupportedQuery (SomeBlockQuery GetBigLedgerPeerSnapshot) ShelleyNodeToClientVersion4 \ No newline at end of file diff --git a/ouroboros-consensus-cardano/golden/cardano/QueryVersion1/CardanoNodeToClientVersion7/Query_Alonzo_GetBigLedgerPeerSnapshot b/ouroboros-consensus-cardano/golden/cardano/QueryVersion1/CardanoNodeToClientVersion7/Query_Alonzo_GetBigLedgerPeerSnapshot index 6a4849acd4..a679a948ec 100644 --- a/ouroboros-consensus-cardano/golden/cardano/QueryVersion1/CardanoNodeToClientVersion7/Query_Alonzo_GetBigLedgerPeerSnapshot +++ b/ouroboros-consensus-cardano/golden/cardano/QueryVersion1/CardanoNodeToClientVersion7/Query_Alonzo_GetBigLedgerPeerSnapshot @@ -1 +1 @@ -ShelleyEncoderUnsupportedQuery (SomeSecond GetBigLedgerPeerSnapshot) ShelleyNodeToClientVersion4 \ No newline at end of file +ShelleyEncoderUnsupportedQuery (SomeBlockQuery GetBigLedgerPeerSnapshot) ShelleyNodeToClientVersion4 \ No newline at end of file diff --git a/ouroboros-consensus-cardano/golden/cardano/QueryVersion1/CardanoNodeToClientVersion7/Query_Mary_GetBigLedgerPeerSnapshot b/ouroboros-consensus-cardano/golden/cardano/QueryVersion1/CardanoNodeToClientVersion7/Query_Mary_GetBigLedgerPeerSnapshot index 6a4849acd4..a679a948ec 100644 --- a/ouroboros-consensus-cardano/golden/cardano/QueryVersion1/CardanoNodeToClientVersion7/Query_Mary_GetBigLedgerPeerSnapshot +++ b/ouroboros-consensus-cardano/golden/cardano/QueryVersion1/CardanoNodeToClientVersion7/Query_Mary_GetBigLedgerPeerSnapshot @@ -1 +1 @@ -ShelleyEncoderUnsupportedQuery (SomeSecond GetBigLedgerPeerSnapshot) ShelleyNodeToClientVersion4 \ No newline at end of file +ShelleyEncoderUnsupportedQuery (SomeBlockQuery GetBigLedgerPeerSnapshot) ShelleyNodeToClientVersion4 \ No newline at end of file diff --git a/ouroboros-consensus-cardano/golden/cardano/QueryVersion1/CardanoNodeToClientVersion7/Query_Shelley_GetBigLedgerPeerSnapshot b/ouroboros-consensus-cardano/golden/cardano/QueryVersion1/CardanoNodeToClientVersion7/Query_Shelley_GetBigLedgerPeerSnapshot index 6a4849acd4..a679a948ec 100644 --- a/ouroboros-consensus-cardano/golden/cardano/QueryVersion1/CardanoNodeToClientVersion7/Query_Shelley_GetBigLedgerPeerSnapshot +++ b/ouroboros-consensus-cardano/golden/cardano/QueryVersion1/CardanoNodeToClientVersion7/Query_Shelley_GetBigLedgerPeerSnapshot @@ -1 +1 @@ -ShelleyEncoderUnsupportedQuery (SomeSecond GetBigLedgerPeerSnapshot) ShelleyNodeToClientVersion4 \ No newline at end of file +ShelleyEncoderUnsupportedQuery (SomeBlockQuery GetBigLedgerPeerSnapshot) ShelleyNodeToClientVersion4 \ No newline at end of file diff --git a/ouroboros-consensus-cardano/golden/cardano/QueryVersion2/CardanoNodeToClientVersion10/Query_Allegra_GetBigLedgerPeerSnapshot b/ouroboros-consensus-cardano/golden/cardano/QueryVersion2/CardanoNodeToClientVersion10/Query_Allegra_GetBigLedgerPeerSnapshot index f06522eab6..ba3a06b703 100644 --- a/ouroboros-consensus-cardano/golden/cardano/QueryVersion2/CardanoNodeToClientVersion10/Query_Allegra_GetBigLedgerPeerSnapshot +++ b/ouroboros-consensus-cardano/golden/cardano/QueryVersion2/CardanoNodeToClientVersion10/Query_Allegra_GetBigLedgerPeerSnapshot @@ -1 +1 @@ -ShelleyEncoderUnsupportedQuery (SomeSecond GetBigLedgerPeerSnapshot) ShelleyNodeToClientVersion6 \ No newline at end of file +ShelleyEncoderUnsupportedQuery (SomeBlockQuery GetBigLedgerPeerSnapshot) ShelleyNodeToClientVersion6 \ No newline at end of file diff --git a/ouroboros-consensus-cardano/golden/cardano/QueryVersion2/CardanoNodeToClientVersion10/Query_Alonzo_GetBigLedgerPeerSnapshot b/ouroboros-consensus-cardano/golden/cardano/QueryVersion2/CardanoNodeToClientVersion10/Query_Alonzo_GetBigLedgerPeerSnapshot index f06522eab6..ba3a06b703 100644 --- a/ouroboros-consensus-cardano/golden/cardano/QueryVersion2/CardanoNodeToClientVersion10/Query_Alonzo_GetBigLedgerPeerSnapshot +++ b/ouroboros-consensus-cardano/golden/cardano/QueryVersion2/CardanoNodeToClientVersion10/Query_Alonzo_GetBigLedgerPeerSnapshot @@ -1 +1 @@ -ShelleyEncoderUnsupportedQuery (SomeSecond GetBigLedgerPeerSnapshot) ShelleyNodeToClientVersion6 \ No newline at end of file +ShelleyEncoderUnsupportedQuery (SomeBlockQuery GetBigLedgerPeerSnapshot) ShelleyNodeToClientVersion6 \ No newline at end of file diff --git a/ouroboros-consensus-cardano/golden/cardano/QueryVersion2/CardanoNodeToClientVersion10/Query_Mary_GetBigLedgerPeerSnapshot b/ouroboros-consensus-cardano/golden/cardano/QueryVersion2/CardanoNodeToClientVersion10/Query_Mary_GetBigLedgerPeerSnapshot index f06522eab6..ba3a06b703 100644 --- a/ouroboros-consensus-cardano/golden/cardano/QueryVersion2/CardanoNodeToClientVersion10/Query_Mary_GetBigLedgerPeerSnapshot +++ b/ouroboros-consensus-cardano/golden/cardano/QueryVersion2/CardanoNodeToClientVersion10/Query_Mary_GetBigLedgerPeerSnapshot @@ -1 +1 @@ -ShelleyEncoderUnsupportedQuery (SomeSecond GetBigLedgerPeerSnapshot) ShelleyNodeToClientVersion6 \ No newline at end of file +ShelleyEncoderUnsupportedQuery (SomeBlockQuery GetBigLedgerPeerSnapshot) ShelleyNodeToClientVersion6 \ No newline at end of file diff --git a/ouroboros-consensus-cardano/golden/cardano/QueryVersion2/CardanoNodeToClientVersion10/Query_Shelley_GetBigLedgerPeerSnapshot b/ouroboros-consensus-cardano/golden/cardano/QueryVersion2/CardanoNodeToClientVersion10/Query_Shelley_GetBigLedgerPeerSnapshot index f06522eab6..ba3a06b703 100644 --- a/ouroboros-consensus-cardano/golden/cardano/QueryVersion2/CardanoNodeToClientVersion10/Query_Shelley_GetBigLedgerPeerSnapshot +++ b/ouroboros-consensus-cardano/golden/cardano/QueryVersion2/CardanoNodeToClientVersion10/Query_Shelley_GetBigLedgerPeerSnapshot @@ -1 +1 @@ -ShelleyEncoderUnsupportedQuery (SomeSecond GetBigLedgerPeerSnapshot) ShelleyNodeToClientVersion6 \ No newline at end of file +ShelleyEncoderUnsupportedQuery (SomeBlockQuery GetBigLedgerPeerSnapshot) ShelleyNodeToClientVersion6 \ No newline at end of file diff --git a/ouroboros-consensus-cardano/golden/cardano/QueryVersion2/CardanoNodeToClientVersion11/Query_Allegra_GetBigLedgerPeerSnapshot b/ouroboros-consensus-cardano/golden/cardano/QueryVersion2/CardanoNodeToClientVersion11/Query_Allegra_GetBigLedgerPeerSnapshot index b8610e9065..759e31cdfa 100644 --- a/ouroboros-consensus-cardano/golden/cardano/QueryVersion2/CardanoNodeToClientVersion11/Query_Allegra_GetBigLedgerPeerSnapshot +++ b/ouroboros-consensus-cardano/golden/cardano/QueryVersion2/CardanoNodeToClientVersion11/Query_Allegra_GetBigLedgerPeerSnapshot @@ -1 +1 @@ -ShelleyEncoderUnsupportedQuery (SomeSecond GetBigLedgerPeerSnapshot) ShelleyNodeToClientVersion7 \ No newline at end of file +ShelleyEncoderUnsupportedQuery (SomeBlockQuery GetBigLedgerPeerSnapshot) ShelleyNodeToClientVersion7 \ No newline at end of file diff --git a/ouroboros-consensus-cardano/golden/cardano/QueryVersion2/CardanoNodeToClientVersion11/Query_Alonzo_GetBigLedgerPeerSnapshot b/ouroboros-consensus-cardano/golden/cardano/QueryVersion2/CardanoNodeToClientVersion11/Query_Alonzo_GetBigLedgerPeerSnapshot index b8610e9065..759e31cdfa 100644 --- a/ouroboros-consensus-cardano/golden/cardano/QueryVersion2/CardanoNodeToClientVersion11/Query_Alonzo_GetBigLedgerPeerSnapshot +++ b/ouroboros-consensus-cardano/golden/cardano/QueryVersion2/CardanoNodeToClientVersion11/Query_Alonzo_GetBigLedgerPeerSnapshot @@ -1 +1 @@ -ShelleyEncoderUnsupportedQuery (SomeSecond GetBigLedgerPeerSnapshot) ShelleyNodeToClientVersion7 \ No newline at end of file +ShelleyEncoderUnsupportedQuery (SomeBlockQuery GetBigLedgerPeerSnapshot) ShelleyNodeToClientVersion7 \ No newline at end of file diff --git a/ouroboros-consensus-cardano/golden/cardano/QueryVersion2/CardanoNodeToClientVersion11/Query_Mary_GetBigLedgerPeerSnapshot b/ouroboros-consensus-cardano/golden/cardano/QueryVersion2/CardanoNodeToClientVersion11/Query_Mary_GetBigLedgerPeerSnapshot index b8610e9065..759e31cdfa 100644 --- a/ouroboros-consensus-cardano/golden/cardano/QueryVersion2/CardanoNodeToClientVersion11/Query_Mary_GetBigLedgerPeerSnapshot +++ b/ouroboros-consensus-cardano/golden/cardano/QueryVersion2/CardanoNodeToClientVersion11/Query_Mary_GetBigLedgerPeerSnapshot @@ -1 +1 @@ -ShelleyEncoderUnsupportedQuery (SomeSecond GetBigLedgerPeerSnapshot) ShelleyNodeToClientVersion7 \ No newline at end of file +ShelleyEncoderUnsupportedQuery (SomeBlockQuery GetBigLedgerPeerSnapshot) ShelleyNodeToClientVersion7 \ No newline at end of file diff --git a/ouroboros-consensus-cardano/golden/cardano/QueryVersion2/CardanoNodeToClientVersion11/Query_Shelley_GetBigLedgerPeerSnapshot b/ouroboros-consensus-cardano/golden/cardano/QueryVersion2/CardanoNodeToClientVersion11/Query_Shelley_GetBigLedgerPeerSnapshot index b8610e9065..759e31cdfa 100644 --- a/ouroboros-consensus-cardano/golden/cardano/QueryVersion2/CardanoNodeToClientVersion11/Query_Shelley_GetBigLedgerPeerSnapshot +++ b/ouroboros-consensus-cardano/golden/cardano/QueryVersion2/CardanoNodeToClientVersion11/Query_Shelley_GetBigLedgerPeerSnapshot @@ -1 +1 @@ -ShelleyEncoderUnsupportedQuery (SomeSecond GetBigLedgerPeerSnapshot) ShelleyNodeToClientVersion7 \ No newline at end of file +ShelleyEncoderUnsupportedQuery (SomeBlockQuery GetBigLedgerPeerSnapshot) ShelleyNodeToClientVersion7 \ No newline at end of file diff --git a/ouroboros-consensus-cardano/golden/cardano/QueryVersion2/CardanoNodeToClientVersion12/Query_Allegra_GetBigLedgerPeerSnapshot b/ouroboros-consensus-cardano/golden/cardano/QueryVersion2/CardanoNodeToClientVersion12/Query_Allegra_GetBigLedgerPeerSnapshot index 8a62a09c26..1f4b0e87fb 100644 --- a/ouroboros-consensus-cardano/golden/cardano/QueryVersion2/CardanoNodeToClientVersion12/Query_Allegra_GetBigLedgerPeerSnapshot +++ b/ouroboros-consensus-cardano/golden/cardano/QueryVersion2/CardanoNodeToClientVersion12/Query_Allegra_GetBigLedgerPeerSnapshot @@ -1 +1 @@ -ShelleyEncoderUnsupportedQuery (SomeSecond GetBigLedgerPeerSnapshot) ShelleyNodeToClientVersion8 \ No newline at end of file +ShelleyEncoderUnsupportedQuery (SomeBlockQuery GetBigLedgerPeerSnapshot) ShelleyNodeToClientVersion8 \ No newline at end of file diff --git a/ouroboros-consensus-cardano/golden/cardano/QueryVersion2/CardanoNodeToClientVersion12/Query_Alonzo_GetBigLedgerPeerSnapshot b/ouroboros-consensus-cardano/golden/cardano/QueryVersion2/CardanoNodeToClientVersion12/Query_Alonzo_GetBigLedgerPeerSnapshot index 8a62a09c26..1f4b0e87fb 100644 --- a/ouroboros-consensus-cardano/golden/cardano/QueryVersion2/CardanoNodeToClientVersion12/Query_Alonzo_GetBigLedgerPeerSnapshot +++ b/ouroboros-consensus-cardano/golden/cardano/QueryVersion2/CardanoNodeToClientVersion12/Query_Alonzo_GetBigLedgerPeerSnapshot @@ -1 +1 @@ -ShelleyEncoderUnsupportedQuery (SomeSecond GetBigLedgerPeerSnapshot) ShelleyNodeToClientVersion8 \ No newline at end of file +ShelleyEncoderUnsupportedQuery (SomeBlockQuery GetBigLedgerPeerSnapshot) ShelleyNodeToClientVersion8 \ No newline at end of file diff --git a/ouroboros-consensus-cardano/golden/cardano/QueryVersion2/CardanoNodeToClientVersion12/Query_Mary_GetBigLedgerPeerSnapshot b/ouroboros-consensus-cardano/golden/cardano/QueryVersion2/CardanoNodeToClientVersion12/Query_Mary_GetBigLedgerPeerSnapshot index 8a62a09c26..1f4b0e87fb 100644 --- a/ouroboros-consensus-cardano/golden/cardano/QueryVersion2/CardanoNodeToClientVersion12/Query_Mary_GetBigLedgerPeerSnapshot +++ b/ouroboros-consensus-cardano/golden/cardano/QueryVersion2/CardanoNodeToClientVersion12/Query_Mary_GetBigLedgerPeerSnapshot @@ -1 +1 @@ -ShelleyEncoderUnsupportedQuery (SomeSecond GetBigLedgerPeerSnapshot) ShelleyNodeToClientVersion8 \ No newline at end of file +ShelleyEncoderUnsupportedQuery (SomeBlockQuery GetBigLedgerPeerSnapshot) ShelleyNodeToClientVersion8 \ No newline at end of file diff --git a/ouroboros-consensus-cardano/golden/cardano/QueryVersion2/CardanoNodeToClientVersion12/Query_Shelley_GetBigLedgerPeerSnapshot b/ouroboros-consensus-cardano/golden/cardano/QueryVersion2/CardanoNodeToClientVersion12/Query_Shelley_GetBigLedgerPeerSnapshot index 8a62a09c26..1f4b0e87fb 100644 --- a/ouroboros-consensus-cardano/golden/cardano/QueryVersion2/CardanoNodeToClientVersion12/Query_Shelley_GetBigLedgerPeerSnapshot +++ b/ouroboros-consensus-cardano/golden/cardano/QueryVersion2/CardanoNodeToClientVersion12/Query_Shelley_GetBigLedgerPeerSnapshot @@ -1 +1 @@ -ShelleyEncoderUnsupportedQuery (SomeSecond GetBigLedgerPeerSnapshot) ShelleyNodeToClientVersion8 \ No newline at end of file +ShelleyEncoderUnsupportedQuery (SomeBlockQuery GetBigLedgerPeerSnapshot) ShelleyNodeToClientVersion8 \ No newline at end of file diff --git a/ouroboros-consensus-cardano/golden/cardano/QueryVersion2/CardanoNodeToClientVersion13/Query_Allegra_GetBigLedgerPeerSnapshot b/ouroboros-consensus-cardano/golden/cardano/QueryVersion2/CardanoNodeToClientVersion13/Query_Allegra_GetBigLedgerPeerSnapshot index 44e742a34e..f9920f3487 100644 --- a/ouroboros-consensus-cardano/golden/cardano/QueryVersion2/CardanoNodeToClientVersion13/Query_Allegra_GetBigLedgerPeerSnapshot +++ b/ouroboros-consensus-cardano/golden/cardano/QueryVersion2/CardanoNodeToClientVersion13/Query_Allegra_GetBigLedgerPeerSnapshot @@ -1 +1 @@ -ShelleyEncoderUnsupportedQuery (SomeSecond GetBigLedgerPeerSnapshot) ShelleyNodeToClientVersion9 \ No newline at end of file +ShelleyEncoderUnsupportedQuery (SomeBlockQuery GetBigLedgerPeerSnapshot) ShelleyNodeToClientVersion9 \ No newline at end of file diff --git a/ouroboros-consensus-cardano/golden/cardano/QueryVersion2/CardanoNodeToClientVersion13/Query_Alonzo_GetBigLedgerPeerSnapshot b/ouroboros-consensus-cardano/golden/cardano/QueryVersion2/CardanoNodeToClientVersion13/Query_Alonzo_GetBigLedgerPeerSnapshot index 44e742a34e..f9920f3487 100644 --- a/ouroboros-consensus-cardano/golden/cardano/QueryVersion2/CardanoNodeToClientVersion13/Query_Alonzo_GetBigLedgerPeerSnapshot +++ b/ouroboros-consensus-cardano/golden/cardano/QueryVersion2/CardanoNodeToClientVersion13/Query_Alonzo_GetBigLedgerPeerSnapshot @@ -1 +1 @@ -ShelleyEncoderUnsupportedQuery (SomeSecond GetBigLedgerPeerSnapshot) ShelleyNodeToClientVersion9 \ No newline at end of file +ShelleyEncoderUnsupportedQuery (SomeBlockQuery GetBigLedgerPeerSnapshot) ShelleyNodeToClientVersion9 \ No newline at end of file diff --git a/ouroboros-consensus-cardano/golden/cardano/QueryVersion2/CardanoNodeToClientVersion13/Query_Mary_GetBigLedgerPeerSnapshot b/ouroboros-consensus-cardano/golden/cardano/QueryVersion2/CardanoNodeToClientVersion13/Query_Mary_GetBigLedgerPeerSnapshot index 44e742a34e..f9920f3487 100644 --- a/ouroboros-consensus-cardano/golden/cardano/QueryVersion2/CardanoNodeToClientVersion13/Query_Mary_GetBigLedgerPeerSnapshot +++ b/ouroboros-consensus-cardano/golden/cardano/QueryVersion2/CardanoNodeToClientVersion13/Query_Mary_GetBigLedgerPeerSnapshot @@ -1 +1 @@ -ShelleyEncoderUnsupportedQuery (SomeSecond GetBigLedgerPeerSnapshot) ShelleyNodeToClientVersion9 \ No newline at end of file +ShelleyEncoderUnsupportedQuery (SomeBlockQuery GetBigLedgerPeerSnapshot) ShelleyNodeToClientVersion9 \ No newline at end of file diff --git a/ouroboros-consensus-cardano/golden/cardano/QueryVersion2/CardanoNodeToClientVersion13/Query_Shelley_GetBigLedgerPeerSnapshot b/ouroboros-consensus-cardano/golden/cardano/QueryVersion2/CardanoNodeToClientVersion13/Query_Shelley_GetBigLedgerPeerSnapshot index 44e742a34e..f9920f3487 100644 --- a/ouroboros-consensus-cardano/golden/cardano/QueryVersion2/CardanoNodeToClientVersion13/Query_Shelley_GetBigLedgerPeerSnapshot +++ b/ouroboros-consensus-cardano/golden/cardano/QueryVersion2/CardanoNodeToClientVersion13/Query_Shelley_GetBigLedgerPeerSnapshot @@ -1 +1 @@ -ShelleyEncoderUnsupportedQuery (SomeSecond GetBigLedgerPeerSnapshot) ShelleyNodeToClientVersion9 \ No newline at end of file +ShelleyEncoderUnsupportedQuery (SomeBlockQuery GetBigLedgerPeerSnapshot) ShelleyNodeToClientVersion9 \ No newline at end of file diff --git a/ouroboros-consensus-cardano/golden/cardano/QueryVersion2/CardanoNodeToClientVersion14/Query_Allegra_GetBigLedgerPeerSnapshot b/ouroboros-consensus-cardano/golden/cardano/QueryVersion2/CardanoNodeToClientVersion14/Query_Allegra_GetBigLedgerPeerSnapshot index 8576dd1663..7bcb40a2c2 100644 --- a/ouroboros-consensus-cardano/golden/cardano/QueryVersion2/CardanoNodeToClientVersion14/Query_Allegra_GetBigLedgerPeerSnapshot +++ b/ouroboros-consensus-cardano/golden/cardano/QueryVersion2/CardanoNodeToClientVersion14/Query_Allegra_GetBigLedgerPeerSnapshot @@ -1 +1 @@ -ShelleyEncoderUnsupportedQuery (SomeSecond GetBigLedgerPeerSnapshot) ShelleyNodeToClientVersion10 \ No newline at end of file +ShelleyEncoderUnsupportedQuery (SomeBlockQuery GetBigLedgerPeerSnapshot) ShelleyNodeToClientVersion10 \ No newline at end of file diff --git a/ouroboros-consensus-cardano/golden/cardano/QueryVersion2/CardanoNodeToClientVersion14/Query_Alonzo_GetBigLedgerPeerSnapshot b/ouroboros-consensus-cardano/golden/cardano/QueryVersion2/CardanoNodeToClientVersion14/Query_Alonzo_GetBigLedgerPeerSnapshot index 8576dd1663..7bcb40a2c2 100644 --- a/ouroboros-consensus-cardano/golden/cardano/QueryVersion2/CardanoNodeToClientVersion14/Query_Alonzo_GetBigLedgerPeerSnapshot +++ b/ouroboros-consensus-cardano/golden/cardano/QueryVersion2/CardanoNodeToClientVersion14/Query_Alonzo_GetBigLedgerPeerSnapshot @@ -1 +1 @@ -ShelleyEncoderUnsupportedQuery (SomeSecond GetBigLedgerPeerSnapshot) ShelleyNodeToClientVersion10 \ No newline at end of file +ShelleyEncoderUnsupportedQuery (SomeBlockQuery GetBigLedgerPeerSnapshot) ShelleyNodeToClientVersion10 \ No newline at end of file diff --git a/ouroboros-consensus-cardano/golden/cardano/QueryVersion2/CardanoNodeToClientVersion14/Query_Mary_GetBigLedgerPeerSnapshot b/ouroboros-consensus-cardano/golden/cardano/QueryVersion2/CardanoNodeToClientVersion14/Query_Mary_GetBigLedgerPeerSnapshot index 8576dd1663..7bcb40a2c2 100644 --- a/ouroboros-consensus-cardano/golden/cardano/QueryVersion2/CardanoNodeToClientVersion14/Query_Mary_GetBigLedgerPeerSnapshot +++ b/ouroboros-consensus-cardano/golden/cardano/QueryVersion2/CardanoNodeToClientVersion14/Query_Mary_GetBigLedgerPeerSnapshot @@ -1 +1 @@ -ShelleyEncoderUnsupportedQuery (SomeSecond GetBigLedgerPeerSnapshot) ShelleyNodeToClientVersion10 \ No newline at end of file +ShelleyEncoderUnsupportedQuery (SomeBlockQuery GetBigLedgerPeerSnapshot) ShelleyNodeToClientVersion10 \ No newline at end of file diff --git a/ouroboros-consensus-cardano/golden/cardano/QueryVersion2/CardanoNodeToClientVersion14/Query_Shelley_GetBigLedgerPeerSnapshot b/ouroboros-consensus-cardano/golden/cardano/QueryVersion2/CardanoNodeToClientVersion14/Query_Shelley_GetBigLedgerPeerSnapshot index 8576dd1663..7bcb40a2c2 100644 --- a/ouroboros-consensus-cardano/golden/cardano/QueryVersion2/CardanoNodeToClientVersion14/Query_Shelley_GetBigLedgerPeerSnapshot +++ b/ouroboros-consensus-cardano/golden/cardano/QueryVersion2/CardanoNodeToClientVersion14/Query_Shelley_GetBigLedgerPeerSnapshot @@ -1 +1 @@ -ShelleyEncoderUnsupportedQuery (SomeSecond GetBigLedgerPeerSnapshot) ShelleyNodeToClientVersion10 \ No newline at end of file +ShelleyEncoderUnsupportedQuery (SomeBlockQuery GetBigLedgerPeerSnapshot) ShelleyNodeToClientVersion10 \ No newline at end of file diff --git a/ouroboros-consensus-cardano/golden/cardano/QueryVersion2/CardanoNodeToClientVersion7/Query_Allegra_GetBigLedgerPeerSnapshot b/ouroboros-consensus-cardano/golden/cardano/QueryVersion2/CardanoNodeToClientVersion7/Query_Allegra_GetBigLedgerPeerSnapshot index 6a4849acd4..a679a948ec 100644 --- a/ouroboros-consensus-cardano/golden/cardano/QueryVersion2/CardanoNodeToClientVersion7/Query_Allegra_GetBigLedgerPeerSnapshot +++ b/ouroboros-consensus-cardano/golden/cardano/QueryVersion2/CardanoNodeToClientVersion7/Query_Allegra_GetBigLedgerPeerSnapshot @@ -1 +1 @@ -ShelleyEncoderUnsupportedQuery (SomeSecond GetBigLedgerPeerSnapshot) ShelleyNodeToClientVersion4 \ No newline at end of file +ShelleyEncoderUnsupportedQuery (SomeBlockQuery GetBigLedgerPeerSnapshot) ShelleyNodeToClientVersion4 \ No newline at end of file diff --git a/ouroboros-consensus-cardano/golden/cardano/QueryVersion2/CardanoNodeToClientVersion7/Query_Alonzo_GetBigLedgerPeerSnapshot b/ouroboros-consensus-cardano/golden/cardano/QueryVersion2/CardanoNodeToClientVersion7/Query_Alonzo_GetBigLedgerPeerSnapshot index 6a4849acd4..a679a948ec 100644 --- a/ouroboros-consensus-cardano/golden/cardano/QueryVersion2/CardanoNodeToClientVersion7/Query_Alonzo_GetBigLedgerPeerSnapshot +++ b/ouroboros-consensus-cardano/golden/cardano/QueryVersion2/CardanoNodeToClientVersion7/Query_Alonzo_GetBigLedgerPeerSnapshot @@ -1 +1 @@ -ShelleyEncoderUnsupportedQuery (SomeSecond GetBigLedgerPeerSnapshot) ShelleyNodeToClientVersion4 \ No newline at end of file +ShelleyEncoderUnsupportedQuery (SomeBlockQuery GetBigLedgerPeerSnapshot) ShelleyNodeToClientVersion4 \ No newline at end of file diff --git a/ouroboros-consensus-cardano/golden/cardano/QueryVersion2/CardanoNodeToClientVersion7/Query_Mary_GetBigLedgerPeerSnapshot b/ouroboros-consensus-cardano/golden/cardano/QueryVersion2/CardanoNodeToClientVersion7/Query_Mary_GetBigLedgerPeerSnapshot index 6a4849acd4..a679a948ec 100644 --- a/ouroboros-consensus-cardano/golden/cardano/QueryVersion2/CardanoNodeToClientVersion7/Query_Mary_GetBigLedgerPeerSnapshot +++ b/ouroboros-consensus-cardano/golden/cardano/QueryVersion2/CardanoNodeToClientVersion7/Query_Mary_GetBigLedgerPeerSnapshot @@ -1 +1 @@ -ShelleyEncoderUnsupportedQuery (SomeSecond GetBigLedgerPeerSnapshot) ShelleyNodeToClientVersion4 \ No newline at end of file +ShelleyEncoderUnsupportedQuery (SomeBlockQuery GetBigLedgerPeerSnapshot) ShelleyNodeToClientVersion4 \ No newline at end of file diff --git a/ouroboros-consensus-cardano/golden/cardano/QueryVersion2/CardanoNodeToClientVersion7/Query_Shelley_GetBigLedgerPeerSnapshot b/ouroboros-consensus-cardano/golden/cardano/QueryVersion2/CardanoNodeToClientVersion7/Query_Shelley_GetBigLedgerPeerSnapshot index 6a4849acd4..a679a948ec 100644 --- a/ouroboros-consensus-cardano/golden/cardano/QueryVersion2/CardanoNodeToClientVersion7/Query_Shelley_GetBigLedgerPeerSnapshot +++ b/ouroboros-consensus-cardano/golden/cardano/QueryVersion2/CardanoNodeToClientVersion7/Query_Shelley_GetBigLedgerPeerSnapshot @@ -1 +1 @@ -ShelleyEncoderUnsupportedQuery (SomeSecond GetBigLedgerPeerSnapshot) ShelleyNodeToClientVersion4 \ No newline at end of file +ShelleyEncoderUnsupportedQuery (SomeBlockQuery GetBigLedgerPeerSnapshot) ShelleyNodeToClientVersion4 \ No newline at end of file diff --git a/ouroboros-consensus-cardano/golden/cardano/QueryVersion2/CardanoNodeToClientVersion8/Query_Allegra_GetBigLedgerPeerSnapshot b/ouroboros-consensus-cardano/golden/cardano/QueryVersion2/CardanoNodeToClientVersion8/Query_Allegra_GetBigLedgerPeerSnapshot index 297ee3e3f7..6427797f08 100644 --- a/ouroboros-consensus-cardano/golden/cardano/QueryVersion2/CardanoNodeToClientVersion8/Query_Allegra_GetBigLedgerPeerSnapshot +++ b/ouroboros-consensus-cardano/golden/cardano/QueryVersion2/CardanoNodeToClientVersion8/Query_Allegra_GetBigLedgerPeerSnapshot @@ -1 +1 @@ -ShelleyEncoderUnsupportedQuery (SomeSecond GetBigLedgerPeerSnapshot) ShelleyNodeToClientVersion5 \ No newline at end of file +ShelleyEncoderUnsupportedQuery (SomeBlockQuery GetBigLedgerPeerSnapshot) ShelleyNodeToClientVersion5 \ No newline at end of file diff --git a/ouroboros-consensus-cardano/golden/cardano/QueryVersion2/CardanoNodeToClientVersion8/Query_Alonzo_GetBigLedgerPeerSnapshot b/ouroboros-consensus-cardano/golden/cardano/QueryVersion2/CardanoNodeToClientVersion8/Query_Alonzo_GetBigLedgerPeerSnapshot index 297ee3e3f7..6427797f08 100644 --- a/ouroboros-consensus-cardano/golden/cardano/QueryVersion2/CardanoNodeToClientVersion8/Query_Alonzo_GetBigLedgerPeerSnapshot +++ b/ouroboros-consensus-cardano/golden/cardano/QueryVersion2/CardanoNodeToClientVersion8/Query_Alonzo_GetBigLedgerPeerSnapshot @@ -1 +1 @@ -ShelleyEncoderUnsupportedQuery (SomeSecond GetBigLedgerPeerSnapshot) ShelleyNodeToClientVersion5 \ No newline at end of file +ShelleyEncoderUnsupportedQuery (SomeBlockQuery GetBigLedgerPeerSnapshot) ShelleyNodeToClientVersion5 \ No newline at end of file diff --git a/ouroboros-consensus-cardano/golden/cardano/QueryVersion2/CardanoNodeToClientVersion8/Query_Mary_GetBigLedgerPeerSnapshot b/ouroboros-consensus-cardano/golden/cardano/QueryVersion2/CardanoNodeToClientVersion8/Query_Mary_GetBigLedgerPeerSnapshot index 297ee3e3f7..6427797f08 100644 --- a/ouroboros-consensus-cardano/golden/cardano/QueryVersion2/CardanoNodeToClientVersion8/Query_Mary_GetBigLedgerPeerSnapshot +++ b/ouroboros-consensus-cardano/golden/cardano/QueryVersion2/CardanoNodeToClientVersion8/Query_Mary_GetBigLedgerPeerSnapshot @@ -1 +1 @@ -ShelleyEncoderUnsupportedQuery (SomeSecond GetBigLedgerPeerSnapshot) ShelleyNodeToClientVersion5 \ No newline at end of file +ShelleyEncoderUnsupportedQuery (SomeBlockQuery GetBigLedgerPeerSnapshot) ShelleyNodeToClientVersion5 \ No newline at end of file diff --git a/ouroboros-consensus-cardano/golden/cardano/QueryVersion2/CardanoNodeToClientVersion8/Query_Shelley_GetBigLedgerPeerSnapshot b/ouroboros-consensus-cardano/golden/cardano/QueryVersion2/CardanoNodeToClientVersion8/Query_Shelley_GetBigLedgerPeerSnapshot index 297ee3e3f7..6427797f08 100644 --- a/ouroboros-consensus-cardano/golden/cardano/QueryVersion2/CardanoNodeToClientVersion8/Query_Shelley_GetBigLedgerPeerSnapshot +++ b/ouroboros-consensus-cardano/golden/cardano/QueryVersion2/CardanoNodeToClientVersion8/Query_Shelley_GetBigLedgerPeerSnapshot @@ -1 +1 @@ -ShelleyEncoderUnsupportedQuery (SomeSecond GetBigLedgerPeerSnapshot) ShelleyNodeToClientVersion5 \ No newline at end of file +ShelleyEncoderUnsupportedQuery (SomeBlockQuery GetBigLedgerPeerSnapshot) ShelleyNodeToClientVersion5 \ No newline at end of file diff --git a/ouroboros-consensus-cardano/golden/cardano/QueryVersion2/CardanoNodeToClientVersion9/Query_Allegra_GetBigLedgerPeerSnapshot b/ouroboros-consensus-cardano/golden/cardano/QueryVersion2/CardanoNodeToClientVersion9/Query_Allegra_GetBigLedgerPeerSnapshot index 297ee3e3f7..6427797f08 100644 --- a/ouroboros-consensus-cardano/golden/cardano/QueryVersion2/CardanoNodeToClientVersion9/Query_Allegra_GetBigLedgerPeerSnapshot +++ b/ouroboros-consensus-cardano/golden/cardano/QueryVersion2/CardanoNodeToClientVersion9/Query_Allegra_GetBigLedgerPeerSnapshot @@ -1 +1 @@ -ShelleyEncoderUnsupportedQuery (SomeSecond GetBigLedgerPeerSnapshot) ShelleyNodeToClientVersion5 \ No newline at end of file +ShelleyEncoderUnsupportedQuery (SomeBlockQuery GetBigLedgerPeerSnapshot) ShelleyNodeToClientVersion5 \ No newline at end of file diff --git a/ouroboros-consensus-cardano/golden/cardano/QueryVersion2/CardanoNodeToClientVersion9/Query_Alonzo_GetBigLedgerPeerSnapshot b/ouroboros-consensus-cardano/golden/cardano/QueryVersion2/CardanoNodeToClientVersion9/Query_Alonzo_GetBigLedgerPeerSnapshot index 297ee3e3f7..6427797f08 100644 --- a/ouroboros-consensus-cardano/golden/cardano/QueryVersion2/CardanoNodeToClientVersion9/Query_Alonzo_GetBigLedgerPeerSnapshot +++ b/ouroboros-consensus-cardano/golden/cardano/QueryVersion2/CardanoNodeToClientVersion9/Query_Alonzo_GetBigLedgerPeerSnapshot @@ -1 +1 @@ -ShelleyEncoderUnsupportedQuery (SomeSecond GetBigLedgerPeerSnapshot) ShelleyNodeToClientVersion5 \ No newline at end of file +ShelleyEncoderUnsupportedQuery (SomeBlockQuery GetBigLedgerPeerSnapshot) ShelleyNodeToClientVersion5 \ No newline at end of file diff --git a/ouroboros-consensus-cardano/golden/cardano/QueryVersion2/CardanoNodeToClientVersion9/Query_Mary_GetBigLedgerPeerSnapshot b/ouroboros-consensus-cardano/golden/cardano/QueryVersion2/CardanoNodeToClientVersion9/Query_Mary_GetBigLedgerPeerSnapshot index 297ee3e3f7..6427797f08 100644 --- a/ouroboros-consensus-cardano/golden/cardano/QueryVersion2/CardanoNodeToClientVersion9/Query_Mary_GetBigLedgerPeerSnapshot +++ b/ouroboros-consensus-cardano/golden/cardano/QueryVersion2/CardanoNodeToClientVersion9/Query_Mary_GetBigLedgerPeerSnapshot @@ -1 +1 @@ -ShelleyEncoderUnsupportedQuery (SomeSecond GetBigLedgerPeerSnapshot) ShelleyNodeToClientVersion5 \ No newline at end of file +ShelleyEncoderUnsupportedQuery (SomeBlockQuery GetBigLedgerPeerSnapshot) ShelleyNodeToClientVersion5 \ No newline at end of file diff --git a/ouroboros-consensus-cardano/golden/cardano/QueryVersion2/CardanoNodeToClientVersion9/Query_Shelley_GetBigLedgerPeerSnapshot b/ouroboros-consensus-cardano/golden/cardano/QueryVersion2/CardanoNodeToClientVersion9/Query_Shelley_GetBigLedgerPeerSnapshot index 297ee3e3f7..6427797f08 100644 --- a/ouroboros-consensus-cardano/golden/cardano/QueryVersion2/CardanoNodeToClientVersion9/Query_Shelley_GetBigLedgerPeerSnapshot +++ b/ouroboros-consensus-cardano/golden/cardano/QueryVersion2/CardanoNodeToClientVersion9/Query_Shelley_GetBigLedgerPeerSnapshot @@ -1 +1 @@ -ShelleyEncoderUnsupportedQuery (SomeSecond GetBigLedgerPeerSnapshot) ShelleyNodeToClientVersion5 \ No newline at end of file +ShelleyEncoderUnsupportedQuery (SomeBlockQuery GetBigLedgerPeerSnapshot) ShelleyNodeToClientVersion5 \ No newline at end of file diff --git a/ouroboros-consensus-cardano/golden/cardano/disk/LedgerTables_Allegra b/ouroboros-consensus-cardano/golden/cardano/disk/LedgerTables_Allegra new file mode 100644 index 0000000000000000000000000000000000000000..681ff87ab8ef42f48edf35c9c0233a735f2761f7 GIT binary patch literal 101 zcmV-r0Gj`Sp@LW-?iE_joIoicbjxc0jGWEVbI2A*{}D;=HoWR?yNW&~0D=O7SUCX8 zlUP!n^m3dsMxvlI%a|dnztL^vh=2uTbJ=J|zf)QJw$* literal 0 HcmV?d00001 diff --git a/ouroboros-consensus-cardano/golden/cardano/disk/LedgerTables_Babbage b/ouroboros-consensus-cardano/golden/cardano/disk/LedgerTables_Babbage new file mode 100644 index 0000000000000000000000000000000000000000..498bb0e491f7ab6afabda60d05f662f1ad52da96 GIT binary patch literal 174 zcmV;f08#&ep@LW-yD+3I9%AsX+FuGzidHXr6QWG&0Pca2dxp54fD;s|0fGgj09ZKy z%ad4Ao%C{?GDf1HGs~DEtH050y0fHE0p;#QIMn_AJR52zM2DZX;(+9^^nohRKYeL;hDN+idN@H(zbaZn=Z)t8B c1Ly*R0oWKq7{3G97)yczNJjwx03sp)5xkv6>;M1& literal 0 HcmV?d00001 diff --git a/ouroboros-consensus-cardano/golden/cardano/disk/LedgerTables_Byron b/ouroboros-consensus-cardano/golden/cardano/disk/LedgerTables_Byron new file mode 100644 index 0000000000..874fe2c986 --- /dev/null +++ b/ouroboros-consensus-cardano/golden/cardano/disk/LedgerTables_Byron @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/ouroboros-consensus-cardano/golden/cardano/disk/LedgerTables_Conway b/ouroboros-consensus-cardano/golden/cardano/disk/LedgerTables_Conway new file mode 100644 index 0000000000000000000000000000000000000000..8661c7842139caf81b63a09e08764c86dde6cabb GIT binary patch literal 174 zcmV;f08#&ep@LW-yD+3I9%AsX+FuGzidHXr6QWG&0Pca2dxp54fD;s|0fGjk09ZKy z%ad4Ao%C{?GDf1HGs~DEtH050y0fHE0p;#QIMn_AJR52zM2DZX;(+9^^nohRKYeL;hDN+idN@H(zbaZn=Z)t8B c1Ly*R0oWKq7{3G97)yczNJjwx03sp)5xpcu?EnA( literal 0 HcmV?d00001 diff --git a/ouroboros-consensus-cardano/golden/cardano/disk/LedgerTables_Mary b/ouroboros-consensus-cardano/golden/cardano/disk/LedgerTables_Mary new file mode 100644 index 0000000000000000000000000000000000000000..a254f4d7214982a96809dcf13d57101c096b53e7 GIT binary patch literal 149 zcmV;G0BZk%p@LW-?iE_joIoicbjxc0jGWEVbI2A*{}D;=HoWR?yNW&~0D=R8SUCX8 zlUP!n^m3dsMxvlI%a|dnztL^vh=2u|JX={MD-1PzHco1bYZ2L7_@xZ*_EZb3<=wZW#mU Dm#9PR literal 0 HcmV?d00001 diff --git a/ouroboros-consensus-cardano/golden/cardano/disk/LedgerTables_Shelley b/ouroboros-consensus-cardano/golden/cardano/disk/LedgerTables_Shelley new file mode 100644 index 0000000000000000000000000000000000000000..558288232b374c7cbc8ccd380fada269556b89ea GIT binary patch literal 105 zcmV-v0G9uOp@LW-?iE_joIoicbjxc0jGWEVbI2A*{}D;=HoWR?yNW&~0D=L6SUCX8 zlUP!n^m3dsMxvlI%a|dnztL^vh=2u=0.14, ouroboros-network-protocols, resource-registry, - serialise ^>=0.2, singletons, sop-core, sop-extras, @@ -658,6 +671,30 @@ executable immdb-server ouroboros-consensus-cardano:unstable-cardano-tools, with-utf8, +executable snapshot-converter + import: common-exe + hs-source-dirs: app + main-is: snapshot-converter.hs + build-depends: + base, + bytestring, + cardano-crypto-class, + cardano-crypto-wrapper, + cardano-lmdb-simple, + cborg, + filepath, + fs-api, + optparse-applicative, + ouroboros-consensus, + ouroboros-consensus-cardano, + ouroboros-consensus-cardano:unstable-cardano-tools, + serialise, + transformers, + with-utf8, + + other-modules: + DBAnalyser.Parsers + test-suite tools-test import: common-test type: exitcode-stdio-1.0 diff --git a/ouroboros-consensus-cardano/src/byron/Ouroboros/Consensus/Byron/Ledger/Forge.hs b/ouroboros-consensus-cardano/src/byron/Ouroboros/Consensus/Byron/Ledger/Forge.hs index a3f5519c52..67078f8975 100644 --- a/ouroboros-consensus-cardano/src/byron/Ouroboros/Consensus/Byron/Ledger/Forge.hs +++ b/ouroboros-consensus-cardano/src/byron/Ouroboros/Consensus/Byron/Ledger/Forge.hs @@ -47,7 +47,7 @@ forgeByronBlock :: => TopLevelConfig ByronBlock -> BlockNo -- ^ Current block number -> SlotNo -- ^ Current slot number - -> TickedLedgerState ByronBlock -- ^ Current ledger + -> TickedLedgerState ByronBlock mk -- ^ Current ledger -> [Validated (GenTx ByronBlock)] -- ^ Txs to include -> PBftIsLeader PBftByronCrypto -- ^ Leader proof ('IsLeader') -> ByronBlock @@ -122,7 +122,7 @@ forgeRegularBlock :: => BlockConfig ByronBlock -> BlockNo -- ^ Current block number -> SlotNo -- ^ Current slot number - -> TickedLedgerState ByronBlock -- ^ Current ledger + -> TickedLedgerState ByronBlock mk -- ^ Current ledger -> [Validated (GenTx ByronBlock)] -- ^ Txs to include -> PBftIsLeader PBftByronCrypto -- ^ Leader proof ('IsLeader') -> ByronBlock diff --git a/ouroboros-consensus-cardano/src/byron/Ouroboros/Consensus/Byron/Ledger/Inspect.hs b/ouroboros-consensus-cardano/src/byron/Ouroboros/Consensus/Byron/Ledger/Inspect.hs index 0380b7ba7e..6df86bab9c 100644 --- a/ouroboros-consensus-cardano/src/byron/Ouroboros/Consensus/Byron/Ledger/Inspect.hs +++ b/ouroboros-consensus-cardano/src/byron/Ouroboros/Consensus/Byron/Ledger/Inspect.hs @@ -102,7 +102,7 @@ data UpdateState = -- | All proposal updates, from new to old protocolUpdates :: LedgerConfig ByronBlock - -> LedgerState ByronBlock + -> LedgerState ByronBlock mk -> [ProtocolUpdate] protocolUpdates genesis st = concat [ map fromCandidate candidates diff --git a/ouroboros-consensus-cardano/src/byron/Ouroboros/Consensus/Byron/Ledger/Ledger.hs b/ouroboros-consensus-cardano/src/byron/Ouroboros/Consensus/Byron/Ledger/Ledger.hs index f455db7d44..575df9a369 100644 --- a/ouroboros-consensus-cardano/src/byron/Ouroboros/Consensus/Byron/Ledger/Ledger.hs +++ b/ouroboros-consensus-cardano/src/byron/Ouroboros/Consensus/Byron/Ledger/Ledger.hs @@ -1,6 +1,7 @@ {-# LANGUAGE DataKinds #-} {-# LANGUAGE DeriveAnyClass #-} {-# LANGUAGE DeriveGeneric #-} +{-# LANGUAGE EmptyCase #-} {-# LANGUAGE FlexibleInstances #-} {-# LANGUAGE GADTs #-} {-# LANGUAGE MultiParamTypeClasses #-} @@ -36,7 +37,8 @@ module Ouroboros.Consensus.Byron.Ledger.Ledger ( -- * Type family instances , BlockQuery (..) , LedgerState (..) - , Ticked (..) + , LedgerTables (..) + , Ticked1 (..) -- * Auxiliary , validationErrorImpossible ) where @@ -60,9 +62,9 @@ import Codec.Serialise (decode, encode) import Control.Monad (replicateM) import Control.Monad.Except (Except, runExcept, throwError) import Data.ByteString (ByteString) -import Data.Kind (Type) import Data.Map.Strict (Map) import qualified Data.Map.Strict as Map +import Data.Void (Void) import GHC.Generics (Generic) import NoThunks.Class (NoThunks) import Ouroboros.Consensus.Block @@ -82,14 +84,14 @@ import Ouroboros.Consensus.Ledger.Extended import Ouroboros.Consensus.Ledger.Query import Ouroboros.Consensus.Ledger.SupportsPeerSelection import Ouroboros.Consensus.Ledger.SupportsProtocol -import Ouroboros.Consensus.Protocol.PBFT -import Ouroboros.Consensus.Util (ShowProxy (..), (..:)) +import Ouroboros.Consensus.Ledger.Tables.Utils +import Ouroboros.Consensus.Util (ShowProxy (..)) {------------------------------------------------------------------------------- LedgerState -------------------------------------------------------------------------------} -data instance LedgerState ByronBlock = ByronLedgerState { +data instance LedgerState ByronBlock mk = ByronLedgerState { byronLedgerTipBlockNo :: !(WithOrigin BlockNo) , byronLedgerState :: !CC.ChainValidationState , byronLedgerTransition :: !ByronTransition @@ -119,7 +121,7 @@ type instance LedgerCfg (LedgerState ByronBlock) = Gen.Config initByronLedgerState :: Gen.Config -> Maybe CC.UTxO -- ^ Optionally override UTxO - -> LedgerState ByronBlock + -> LedgerState ByronBlock mk initByronLedgerState genesis mUtxo = ByronLedgerState { byronLedgerState = override mUtxo initState , byronLedgerTipBlockNo = Origin @@ -144,7 +146,7 @@ initByronLedgerState genesis mUtxo = ByronLedgerState { instance GetTip (LedgerState ByronBlock) where getTip = castPoint . getByronTip . byronLedgerState -instance GetTip (Ticked (LedgerState ByronBlock)) where +instance GetTip (Ticked1 (LedgerState ByronBlock)) where getTip = castPoint . getByronTip . tickedByronLedgerState getByronTip :: CC.ChainValidationState -> Point ByronBlock @@ -162,7 +164,7 @@ getByronTip state = -------------------------------------------------------------------------------} -- | The ticked Byron ledger state -data instance Ticked (LedgerState ByronBlock) = TickedByronLedgerState { +data instance Ticked1 (LedgerState ByronBlock) mk = TickedByronLedgerState { tickedByronLedgerState :: !CC.ChainValidationState , untickedByronLedgerTransition :: !ByronTransition } @@ -182,6 +184,18 @@ instance IsLedger (LedgerState ByronBlock) where byronLedgerTransition } +type instance Key (LedgerState ByronBlock) = Void +type instance Value (LedgerState ByronBlock) = Void + +instance HasLedgerTables (LedgerState ByronBlock) +instance HasLedgerTables (Ticked1 (LedgerState ByronBlock)) +instance CanSerializeLedgerTables (LedgerState ByronBlock) +instance CanStowLedgerTables (LedgerState ByronBlock) +instance LedgerTablesAreTrivial (LedgerState ByronBlock) where + convertMapKind (ByronLedgerState x y z) = ByronLedgerState x y z +instance LedgerTablesAreTrivial (Ticked1 (LedgerState ByronBlock)) where + convertMapKind (TickedByronLedgerState x y) = TickedByronLedgerState x y + {------------------------------------------------------------------------------- Supporting the various consensus interfaces -------------------------------------------------------------------------------} @@ -197,20 +211,26 @@ instance ApplyBlock (LedgerState ByronBlock) ByronBlock where where validationMode = CC.fromBlockValidationMode CC.NoBlockValidation -data instance BlockQuery ByronBlock :: Type -> Type where - GetUpdateInterfaceState :: BlockQuery ByronBlock UPI.State + getBlockKeySets _ = emptyLedgerTables + +data instance BlockQuery ByronBlock fp result where + GetUpdateInterfaceState :: BlockQuery ByronBlock QFNoTables UPI.State instance BlockSupportsLedgerQuery ByronBlock where - answerBlockQuery _cfg GetUpdateInterfaceState (ExtLedgerState ledgerState _) = - CC.cvsUpdateState (byronLedgerState ledgerState) + answerPureBlockQuery _cfg GetUpdateInterfaceState dlv = + CC.cvsUpdateState (byronLedgerState ledgerState) + where + ExtLedgerState { ledgerState } = dlv + answerBlockQueryLookup _cfg q _dlv = case q of {} + answerBlockQueryTraverse _cfg q _dlv = case q of {} -instance SameDepIndex (BlockQuery ByronBlock) where - sameDepIndex GetUpdateInterfaceState GetUpdateInterfaceState = Just Refl +instance SameDepIndex2 (BlockQuery ByronBlock) where + sameDepIndex2 GetUpdateInterfaceState GetUpdateInterfaceState = Just Refl -deriving instance Eq (BlockQuery ByronBlock result) -deriving instance Show (BlockQuery ByronBlock result) +deriving instance Eq (BlockQuery ByronBlock fp result) +deriving instance Show (BlockQuery ByronBlock fp result) -instance ShowQuery (BlockQuery ByronBlock) where +instance ShowQuery (BlockQuery ByronBlock fp) where showResult GetUpdateInterfaceState = show instance ShowProxy (BlockQuery ByronBlock) where @@ -223,7 +243,7 @@ instance CommonProtocolParams ByronBlock where maxTxSize = fromIntegral . Update.ppMaxTxSize . getProtocolParameters -- | Return the protocol parameters adopted by the given ledger. -getProtocolParameters :: LedgerState ByronBlock -> Update.ProtocolParameters +getProtocolParameters :: LedgerState ByronBlock mk -> Update.ProtocolParameters getProtocolParameters = CC.adoptedProtocolParameters . CC.cvsUpdateState @@ -326,8 +346,8 @@ validationErrorImpossible = cantBeError . runExcept applyByronBlock :: CC.ValidationMode -> LedgerConfig ByronBlock -> ByronBlock - -> TickedLedgerState ByronBlock - -> Except (LedgerError ByronBlock) (LedgerState ByronBlock) + -> TickedLedgerState ByronBlock mk1 + -> Except (LedgerError ByronBlock) (LedgerState ByronBlock mk2) applyByronBlock validationMode cfg blk@(ByronBlock raw _ (ByronHash blkHash)) @@ -344,8 +364,8 @@ applyABlock :: CC.ValidationMode -> CC.ABlock ByteString -> CC.HeaderHash -> BlockNo - -> Ticked (LedgerState (ByronBlock)) - -> Except (LedgerError ByronBlock) (LedgerState ByronBlock) + -> TickedLedgerState ByronBlock mk1 + -> Except (LedgerError ByronBlock) (LedgerState ByronBlock mk2) applyABlock validationMode cfg blk blkHash blkNo TickedByronLedgerState{..} = do st' <- CC.validateBlock cfg validationMode blk blkHash tickedByronLedgerState @@ -384,8 +404,8 @@ applyABlock validationMode cfg blk blkHash blkNo TickedByronLedgerState{..} = do applyABoundaryBlock :: Gen.Config -> CC.ABoundaryBlock ByteString -> BlockNo - -> Ticked (LedgerState ByronBlock) - -> Except (LedgerError ByronBlock) (LedgerState ByronBlock) + -> TickedLedgerState ByronBlock mk1 + -> Except (LedgerError ByronBlock) (LedgerState ByronBlock mk2) applyABoundaryBlock cfg blk blkNo TickedByronLedgerState{..} = do st' <- CC.validateBoundary cfg blk tickedByronLedgerState return ByronLedgerState { @@ -404,7 +424,7 @@ encodeByronAnnTip = encodeAnnTipIsEBB encodeByronHeaderHash decodeByronAnnTip :: Decoder s (AnnTip ByronBlock) decodeByronAnnTip = decodeAnnTipIsEBB decodeByronHeaderHash -encodeByronExtLedgerState :: ExtLedgerState ByronBlock -> Encoding +encodeByronExtLedgerState :: ExtLedgerState ByronBlock mk -> Encoding encodeByronExtLedgerState = encodeExtLedgerState encodeByronLedgerState encodeByronChainDepState @@ -468,7 +488,7 @@ decodeByronTransition = do bno <- decode return (Update.ProtocolVersion { pvMajor, pvMinor, pvAlt }, bno) -encodeByronLedgerState :: LedgerState ByronBlock -> Encoding +encodeByronLedgerState :: LedgerState ByronBlock mk -> Encoding encodeByronLedgerState ByronLedgerState{..} = mconcat [ encodeListLen 3 , encode byronLedgerTipBlockNo @@ -476,7 +496,7 @@ encodeByronLedgerState ByronLedgerState{..} = mconcat [ , encodeByronTransition byronLedgerTransition ] -decodeByronLedgerState :: Decoder s (LedgerState ByronBlock) +decodeByronLedgerState :: Decoder s (LedgerState ByronBlock mk) decodeByronLedgerState = do enforceSize "ByronLedgerState" 3 ByronLedgerState @@ -484,22 +504,22 @@ decodeByronLedgerState = do <*> decode <*> decodeByronTransition -encodeByronQuery :: BlockQuery ByronBlock result -> Encoding +encodeByronQuery :: BlockQuery ByronBlock fp result -> Encoding encodeByronQuery query = case query of GetUpdateInterfaceState -> CBOR.encodeWord8 0 -decodeByronQuery :: Decoder s (SomeSecond BlockQuery ByronBlock) +decodeByronQuery :: Decoder s (SomeBlockQuery (BlockQuery ByronBlock)) decodeByronQuery = do tag <- CBOR.decodeWord8 case tag of - 0 -> return $ SomeSecond GetUpdateInterfaceState + 0 -> return $ SomeBlockQuery GetUpdateInterfaceState _ -> fail $ "decodeByronQuery: invalid tag " <> show tag -encodeByronResult :: BlockQuery ByronBlock result -> result -> Encoding +encodeByronResult :: BlockQuery ByronBlock fp result -> result -> Encoding encodeByronResult query = case query of GetUpdateInterfaceState -> toByronCBOR -decodeByronResult :: BlockQuery ByronBlock result +decodeByronResult :: BlockQuery ByronBlock fp result -> forall s. Decoder s result decodeByronResult query = case query of GetUpdateInterfaceState -> fromByronCBOR diff --git a/ouroboros-consensus-cardano/src/byron/Ouroboros/Consensus/Byron/Ledger/Mempool.hs b/ouroboros-consensus-cardano/src/byron/Ouroboros/Consensus/Byron/Ledger/Mempool.hs index 8200ddf8c1..427d5fa38d 100644 --- a/ouroboros-consensus-cardano/src/byron/Ouroboros/Consensus/Byron/Ledger/Mempool.hs +++ b/ouroboros-consensus-cardano/src/byron/Ouroboros/Consensus/Byron/Ledger/Mempool.hs @@ -71,6 +71,7 @@ import Ouroboros.Consensus.Byron.Ledger.Serialisation (byronBlockEncodingOverhead) import Ouroboros.Consensus.Ledger.Abstract import Ouroboros.Consensus.Ledger.SupportsMempool +import Ouroboros.Consensus.Ledger.Tables.Utils import Ouroboros.Consensus.Util (ShowProxy (..)) import Ouroboros.Consensus.Util.Condense @@ -124,6 +125,8 @@ instance LedgerSupportsMempool ByronBlock where txForgetValidated = forgetValidatedByronTx + getTransactionKeySets _ = emptyLedgerTables + instance TxLimits ByronBlock where type TxMeasure ByronBlock = IgnoringOverflow ByteSize32 @@ -263,8 +266,8 @@ applyByronGenTx :: CC.ValidationMode -> LedgerConfig ByronBlock -> SlotNo -> GenTx ByronBlock - -> TickedLedgerState ByronBlock - -> Except (ApplyTxErr ByronBlock) (TickedLedgerState ByronBlock) + -> TickedLedgerState ByronBlock mk1 + -> Except (ApplyTxErr ByronBlock) (TickedLedgerState ByronBlock mk2) applyByronGenTx validationMode cfg slot genTx st = (\state -> st {tickedByronLedgerState = state}) <$> CC.applyMempoolPayload diff --git a/ouroboros-consensus-cardano/src/byron/Ouroboros/Consensus/Byron/Node/Serialisation.hs b/ouroboros-consensus-cardano/src/byron/Ouroboros/Consensus/Byron/Node/Serialisation.hs index eecb969b32..0024fc55c1 100644 --- a/ouroboros-consensus-cardano/src/byron/Ouroboros/Consensus/Byron/Node/Serialisation.hs +++ b/ouroboros-consensus-cardano/src/byron/Ouroboros/Consensus/Byron/Node/Serialisation.hs @@ -25,6 +25,7 @@ import Ouroboros.Consensus.Byron.Ledger import Ouroboros.Consensus.Byron.Ledger.Conversions import Ouroboros.Consensus.Byron.Protocol import Ouroboros.Consensus.HeaderValidation +import Ouroboros.Consensus.Ledger.Query import Ouroboros.Consensus.Ledger.SupportsMempool (GenTxId) import Ouroboros.Consensus.Node.Run import Ouroboros.Consensus.Node.Serialisation @@ -48,9 +49,9 @@ instance EncodeDisk ByronBlock ByronBlock where instance DecodeDisk ByronBlock (Lazy.ByteString -> ByronBlock) where decodeDisk ccfg = decodeByronBlock (getByronEpochSlots ccfg) -instance EncodeDisk ByronBlock (LedgerState ByronBlock) where +instance EncodeDisk ByronBlock (LedgerState ByronBlock mk) where encodeDisk _ = encodeByronLedgerState -instance DecodeDisk ByronBlock (LedgerState ByronBlock) where +instance DecodeDisk ByronBlock (LedgerState ByronBlock mk) where decodeDisk _ = decodeByronLedgerState -- | @'ChainDepState' ('BlockProtocol' 'ByronBlock')@ @@ -177,13 +178,13 @@ instance SerialiseNodeToClient ByronBlock CC.ApplyMempoolPayloadErr where encodeNodeToClient _ _ = encodeByronApplyTxError decodeNodeToClient _ _ = decodeByronApplyTxError -instance SerialiseNodeToClient ByronBlock (SomeSecond BlockQuery ByronBlock) where - encodeNodeToClient _ _ (SomeSecond q) = encodeByronQuery q +instance SerialiseNodeToClient ByronBlock (SomeBlockQuery (BlockQuery ByronBlock)) where + encodeNodeToClient _ _ (SomeBlockQuery q) = encodeByronQuery q decodeNodeToClient _ _ = decodeByronQuery -instance SerialiseResult ByronBlock (BlockQuery ByronBlock) where - encodeResult _ _ = encodeByronResult - decodeResult _ _ = decodeByronResult +instance SerialiseResult' ByronBlock BlockQuery where + encodeResult' _ _ = encodeByronResult + decodeResult' _ _ = decodeByronResult {------------------------------------------------------------------------------- Nested contents diff --git a/ouroboros-consensus-cardano/src/ouroboros-consensus-cardano/Ouroboros/Consensus/Cardano/Block.hs b/ouroboros-consensus-cardano/src/ouroboros-consensus-cardano/Ouroboros/Consensus/Cardano/Block.hs index ac41aafe2f..82772b5ad4 100644 --- a/ouroboros-consensus-cardano/src/ouroboros-consensus-cardano/Ouroboros/Consensus/Cardano/Block.hs +++ b/ouroboros-consensus-cardano/src/ouroboros-consensus-cardano/Ouroboros/Consensus/Cardano/Block.hs @@ -2,7 +2,9 @@ {-# LANGUAGE DisambiguateRecordFields #-} {-# LANGUAGE GADTs #-} {-# LANGUAGE PatternSynonyms #-} +{-# LANGUAGE PolyKinds #-} {-# LANGUAGE StandaloneKindSignatures #-} +{-# LANGUAGE TypeFamilies #-} {-# LANGUAGE TypeOperators #-} {-# LANGUAGE ViewPatterns #-} @@ -71,6 +73,7 @@ module Ouroboros.Consensus.Cardano.Block ( import Data.Kind import Data.SOP.BasicFunctors +import Data.SOP.Functors import Data.SOP.Strict import Ouroboros.Consensus.Block (BlockProtocol) import Ouroboros.Consensus.Byron.Ledger.Block (ByronBlock) @@ -80,6 +83,7 @@ import qualified Ouroboros.Consensus.HardFork.Combinator.State as State import Ouroboros.Consensus.HeaderValidation (OtherHeaderEnvelopeError, TipInfo) import Ouroboros.Consensus.Ledger.Abstract (LedgerError) +import Ouroboros.Consensus.Ledger.Query import Ouroboros.Consensus.Ledger.SupportsMempool (ApplyTxErr, GenTxId) import Ouroboros.Consensus.Protocol.Abstract (ChainDepState) @@ -679,56 +683,56 @@ type CardanoQuery c = BlockQuery (CardanoBlock c) pattern QueryIfCurrentByron :: () => CardanoQueryResult c result ~ a - => BlockQuery ByronBlock result - -> CardanoQuery c a + => BlockQuery ByronBlock fp result + -> CardanoQuery c fp a -- | Shelley-specific query that can only be answered when the ledger is in the -- Shelley era. pattern QueryIfCurrentShelley :: () => CardanoQueryResult c result ~ a - => BlockQuery (ShelleyBlock (TPraos c) (ShelleyEra c)) result - -> CardanoQuery c a + => BlockQuery (ShelleyBlock (TPraos c) (ShelleyEra c)) fp result + -> CardanoQuery c fp a -- | Allegra-specific query that can only be answered when the ledger is in the -- Allegra era. pattern QueryIfCurrentAllegra :: () => CardanoQueryResult c result ~ a - => BlockQuery (ShelleyBlock (TPraos c) (AllegraEra c)) result - -> CardanoQuery c a + => BlockQuery (ShelleyBlock (TPraos c) (AllegraEra c)) fp result + -> CardanoQuery c fp a -- | Mary-specific query that can only be answered when the ledger is in the -- Mary era. pattern QueryIfCurrentMary :: () => CardanoQueryResult c result ~ a - => BlockQuery (ShelleyBlock (TPraos c) (MaryEra c)) result - -> CardanoQuery c a + => BlockQuery (ShelleyBlock (TPraos c) (MaryEra c)) fp result + -> CardanoQuery c fp a -- | Alonzo-specific query that can only be answered when the ledger is in the -- Alonzo era. pattern QueryIfCurrentAlonzo :: () => CardanoQueryResult c result ~ a - => BlockQuery (ShelleyBlock (TPraos c) (AlonzoEra c)) result - -> CardanoQuery c a + => BlockQuery (ShelleyBlock (TPraos c) (AlonzoEra c)) fp result + -> CardanoQuery c fp a -- | Babbage-specific query that can only be answered when the ledger is in the -- Babbage era. pattern QueryIfCurrentBabbage :: () => CardanoQueryResult c result ~ a - => BlockQuery (ShelleyBlock (Praos c) (BabbageEra c)) result - -> CardanoQuery c a + => BlockQuery (ShelleyBlock (Praos c) (BabbageEra c)) fp result + -> CardanoQuery c fp a -- | Conway-specific query that can only be answered when the ledger is in the -- Conway era. pattern QueryIfCurrentConway :: () => CardanoQueryResult c result ~ a - => BlockQuery (ShelleyBlock (Praos c) (ConwayEra c)) result - -> CardanoQuery c a + => BlockQuery (ShelleyBlock (Praos c) (ConwayEra c)) fp result + -> CardanoQuery c fp a -- Here we use layout and adjacency to make it obvious that we haven't -- miscounted. @@ -751,7 +755,7 @@ pattern QueryIfCurrentConway q = QueryIfCurrent (QS (QS (QS (QS (QS (QS (QZ q)) -- pattern QueryAnytimeByron :: QueryAnytime result - -> CardanoQuery c result + -> CardanoQuery c QFNoTables result pattern QueryAnytimeByron q = QueryAnytime q (EraIndex (TagByron (K ()))) -- | Query about the Shelley era that can be answered anytime, i.e., @@ -764,7 +768,7 @@ pattern QueryAnytimeByron q = QueryAnytime q (EraIndex (TagByron (K ()))) -- pattern QueryAnytimeShelley :: QueryAnytime result - -> CardanoQuery c result + -> CardanoQuery c QFNoTables result pattern QueryAnytimeShelley q = QueryAnytime q (EraIndex (TagShelley (K ()))) -- | Query about the Allegra era that can be answered anytime, i.e., @@ -777,7 +781,7 @@ pattern QueryAnytimeShelley q = QueryAnytime q (EraIndex (TagShelley (K ()))) -- pattern QueryAnytimeAllegra :: QueryAnytime result - -> CardanoQuery c result + -> CardanoQuery c QFNoTables result pattern QueryAnytimeAllegra q = QueryAnytime q (EraIndex (TagAllegra (K ()))) -- | Query about the Mary era that can be answered anytime, i.e., @@ -790,7 +794,7 @@ pattern QueryAnytimeAllegra q = QueryAnytime q (EraIndex (TagAllegra (K ()))) -- pattern QueryAnytimeMary :: QueryAnytime result - -> CardanoQuery c result + -> CardanoQuery c QFNoTables result pattern QueryAnytimeMary q = QueryAnytime q (EraIndex (TagMary (K ()))) -- | Query about the Alonzo era that can be answered anytime, i.e., independent @@ -803,7 +807,7 @@ pattern QueryAnytimeMary q = QueryAnytime q (EraIndex (TagMary (K ()))) -- pattern QueryAnytimeAlonzo :: QueryAnytime result - -> CardanoQuery c result + -> CardanoQuery c QFNoTables result pattern QueryAnytimeAlonzo q = QueryAnytime q (EraIndex (TagAlonzo (K ()))) -- | Query about the Babbage era that can be answered anytime, i.e., independent @@ -816,7 +820,7 @@ pattern QueryAnytimeAlonzo q = QueryAnytime q (EraIndex (TagAlonzo (K ()))) -- pattern QueryAnytimeBabbage :: QueryAnytime result - -> CardanoQuery c result + -> CardanoQuery c QFNoTables result pattern QueryAnytimeBabbage q = QueryAnytime q (EraIndex (TagBabbage (K ()))) -- | Query about the Conway era that can be answered anytime, i.e., independent @@ -829,7 +833,7 @@ pattern QueryAnytimeBabbage q = QueryAnytime q (EraIndex (TagBabbage (K ()))) -- pattern QueryAnytimeConway :: QueryAnytime result - -> CardanoQuery c result + -> CardanoQuery c QFNoTables result pattern QueryAnytimeConway q = QueryAnytime q (EraIndex (TagConway (K ()))) {-# COMPLETE QueryIfCurrentByron @@ -1053,63 +1057,63 @@ pattern CardanoLedgerConfig cfgByron cfgShelley cfgAllegra cfgMary cfgAlonzo cfg -- 'LedgerState'. We don't give access to those internal details through the -- pattern synonyms. This is also the reason the pattern synonyms are not -- bidirectional. -type CardanoLedgerState c = LedgerState (CardanoBlock c) +type CardanoLedgerState c mk = LedgerState (CardanoBlock c) mk pattern LedgerStateByron - :: LedgerState ByronBlock - -> CardanoLedgerState c + :: LedgerState ByronBlock mk + -> CardanoLedgerState c mk pattern LedgerStateByron st <- HardForkLedgerState (State.HardForkState - (TeleByron (State.Current { currentState = st }))) + (TeleByron (State.Current { currentState = Flip st }))) pattern LedgerStateShelley - :: LedgerState (ShelleyBlock (TPraos c) (ShelleyEra c)) - -> CardanoLedgerState c + :: LedgerState (ShelleyBlock (TPraos c) (ShelleyEra c)) mk + -> CardanoLedgerState c mk pattern LedgerStateShelley st <- HardForkLedgerState (State.HardForkState - (TeleShelley _ (State.Current { currentState = st }))) + (TeleShelley _ (State.Current { currentState = Flip st }))) pattern LedgerStateAllegra - :: LedgerState (ShelleyBlock (TPraos c) (AllegraEra c)) - -> CardanoLedgerState c + :: LedgerState (ShelleyBlock (TPraos c) (AllegraEra c)) mk + -> CardanoLedgerState c mk pattern LedgerStateAllegra st <- HardForkLedgerState (State.HardForkState - (TeleAllegra _ _ (State.Current { currentState = st }))) + (TeleAllegra _ _ (State.Current { currentState = Flip st }))) pattern LedgerStateMary - :: LedgerState (ShelleyBlock (TPraos c) (MaryEra c)) - -> CardanoLedgerState c + :: LedgerState (ShelleyBlock (TPraos c) (MaryEra c)) mk + -> CardanoLedgerState c mk pattern LedgerStateMary st <- HardForkLedgerState (State.HardForkState - (TeleMary _ _ _ (State.Current { currentState = st }))) + (TeleMary _ _ _ (State.Current { currentState = Flip st }))) pattern LedgerStateAlonzo - :: LedgerState (ShelleyBlock (TPraos c) (AlonzoEra c)) - -> CardanoLedgerState c + :: LedgerState (ShelleyBlock (TPraos c) (AlonzoEra c)) mk + -> CardanoLedgerState c mk pattern LedgerStateAlonzo st <- HardForkLedgerState (State.HardForkState - (TeleAlonzo _ _ _ _ (State.Current { currentState = st }))) + (TeleAlonzo _ _ _ _ (State.Current { currentState = Flip st }))) pattern LedgerStateBabbage - :: LedgerState (ShelleyBlock (Praos c) (BabbageEra c)) - -> CardanoLedgerState c + :: LedgerState (ShelleyBlock (Praos c) (BabbageEra c)) mk + -> CardanoLedgerState c mk pattern LedgerStateBabbage st <- HardForkLedgerState (State.HardForkState - (TeleBabbage _ _ _ _ _ (State.Current { currentState = st }))) + (TeleBabbage _ _ _ _ _ (State.Current { currentState = Flip st }))) pattern LedgerStateConway - :: LedgerState (ShelleyBlock (Praos c) (ConwayEra c)) - -> CardanoLedgerState c + :: LedgerState (ShelleyBlock (Praos c) (ConwayEra c)) mk + -> CardanoLedgerState c mk pattern LedgerStateConway st <- HardForkLedgerState (State.HardForkState - (TeleConway _ _ _ _ _ _ (State.Current { currentState = st }))) + (TeleConway _ _ _ _ _ _ (State.Current { currentState = Flip st }))) {-# COMPLETE LedgerStateByron , LedgerStateShelley diff --git a/ouroboros-consensus-cardano/src/ouroboros-consensus-cardano/Ouroboros/Consensus/Cardano/ByronHFC.hs b/ouroboros-consensus-cardano/src/ouroboros-consensus-cardano/Ouroboros/Consensus/Cardano/ByronHFC.hs index 140c633968..e93586acd6 100644 --- a/ouroboros-consensus-cardano/src/ouroboros-consensus-cardano/Ouroboros/Consensus/Cardano/ByronHFC.hs +++ b/ouroboros-consensus-cardano/src/ouroboros-consensus-cardano/Ouroboros/Consensus/Cardano/ByronHFC.hs @@ -1,12 +1,21 @@ {-# LANGUAGE DataKinds #-} +{-# LANGUAGE DerivingStrategies #-} +{-# LANGUAGE EmptyCase #-} {-# LANGUAGE FlexibleInstances #-} +{-# LANGUAGE GeneralizedNewtypeDeriving #-} +{-# LANGUAGE ScopedTypeVariables #-} {-# LANGUAGE TypeApplications #-} +{-# LANGUAGE TypeFamilies #-} {-# OPTIONS_GHC -Wno-orphans #-} module Ouroboros.Consensus.Cardano.ByronHFC (ByronBlockHFC) where +import Cardano.Binary (FromCBOR (..), ToCBOR (..)) import qualified Data.Map.Strict as Map +import Data.SOP.Index (Index (..)) +import Data.Void (Void, absurd) +import NoThunks.Class (NoThunks) import Ouroboros.Consensus.Block import Ouroboros.Consensus.Byron.Ledger import Ouroboros.Consensus.Byron.Node () @@ -16,6 +25,7 @@ import Ouroboros.Consensus.Config import Ouroboros.Consensus.HardFork.Combinator import Ouroboros.Consensus.HardFork.Combinator.Degenerate import Ouroboros.Consensus.HardFork.Combinator.Serialisation.Common +import Ouroboros.Consensus.Ledger.Query import Ouroboros.Consensus.Node.NetworkProtocolVersion import Ouroboros.Consensus.Storage.Serialisation @@ -75,3 +85,44 @@ instance SerialiseHFC '[ByronBlock] where reconstructNestedCtxt (Proxy @(Header ByronBlock)) prefix blockSize getHfcBinaryBlockInfo (DegenBlock b) = getBinaryBlockInfo b + +{------------------------------------------------------------------------------- + Canonical TxIn +-------------------------------------------------------------------------------} + +instance HasCanonicalTxIn '[ByronBlock] where + newtype instance CanonicalTxIn '[ByronBlock] = ByronHFCTxIn { + getByronHFCTxIn :: Void + } + deriving stock (Show, Eq, Ord) + deriving newtype (NoThunks, FromCBOR, ToCBOR) + + injectCanonicalTxIn IZ key = absurd key + injectCanonicalTxIn (IS idx') _ = case idx' of {} + + distribCanonicalTxIn _ key = absurd $ getByronHFCTxIn key + + encodeCanonicalTxIn = toCBOR + + decodeCanonicalTxIn = fromCBOR + +instance HasHardForkTxOut '[ByronBlock] where + type instance HardForkTxOut '[ByronBlock] = Void + injectHardForkTxOut IZ txout = absurd txout + injectHardForkTxOut (IS idx') _ = case idx' of {} + distribHardForkTxOut IZ txout = absurd txout + distribHardForkTxOut (IS idx') _ = case idx' of {} + +instance SerializeHardForkTxOut '[ByronBlock] where + encodeHardForkTxOut _ = toCBOR + decodeHardForkTxOut _ = fromCBOR + +instance BlockSupportsHFLedgerQuery '[ByronBlock] where + answerBlockQueryHFLookup IZ _cfg (q :: BlockQuery ByronBlock QFLookupTables result) _dlv = case q of {} + answerBlockQueryHFLookup (IS is) _cfg _q _dlv = case is of {} + + answerBlockQueryHFTraverse IZ _cfg (q :: BlockQuery ByronBlock QFTraverseTables result) _dlv = case q of {} + answerBlockQueryHFTraverse (IS is) _cfg _q _dlv = case is of {} + + queryLedgerGetTraversingFilter IZ (q :: BlockQuery ByronBlock QFTraverseTables result) = case q of {} + queryLedgerGetTraversingFilter (IS is) _q = case is of {} diff --git a/ouroboros-consensus-cardano/src/ouroboros-consensus-cardano/Ouroboros/Consensus/Cardano/CanHardFork.hs b/ouroboros-consensus-cardano/src/ouroboros-consensus-cardano/Ouroboros/Consensus/Cardano/CanHardFork.hs index 8fb6bf08ec..8d8bc06dcd 100644 --- a/ouroboros-consensus-cardano/src/ouroboros-consensus-cardano/Ouroboros/Consensus/Cardano/CanHardFork.hs +++ b/ouroboros-consensus-cardano/src/ouroboros-consensus-cardano/Ouroboros/Consensus/Cardano/CanHardFork.hs @@ -26,13 +26,20 @@ module Ouroboros.Consensus.Cardano.CanHardFork ( , ShelleyPartialLedgerConfig (..) , crossEraForecastAcrossShelley , translateChainDepStateAcrossShelley + -- * Exposed for testing + , getConwayTranslationContext ) where + import qualified Cardano.Chain.Common as CC import qualified Cardano.Chain.Genesis as CC.Genesis import qualified Cardano.Chain.Update as CC.Update import Cardano.Crypto.DSIGN (Ed25519DSIGN) import Cardano.Crypto.Hash.Blake2b (Blake2b_224, Blake2b_256) +import Cardano.Ledger.Allegra.Translation + (shelleyToAllegraAVVMsToDelete) +import qualified Cardano.Ledger.BaseTypes as SL +import qualified Cardano.Ledger.Core as Core import Cardano.Ledger.Crypto (ADDRHASH, Crypto, DSIGN, HASH) import qualified Cardano.Ledger.Era as SL import qualified Cardano.Ledger.Genesis as SL @@ -46,11 +53,13 @@ import qualified Cardano.Protocol.TPraos.Rules.Prtcl as SL import qualified Cardano.Protocol.TPraos.Rules.Tickn as SL import Control.Monad import Control.Monad.Except (runExcept, throwError) +import Data.Void import Data.Coerce (coerce) import qualified Data.Map.Strict as Map import Data.Maybe (listToMaybe, mapMaybe) import Data.Proxy import Data.SOP.BasicFunctors +import Data.SOP.Functors (Flip (..)) import Data.SOP.InPairs (RequiringBoth (..), ignoringBoth) import qualified Data.SOP.Strict as SOP import Data.SOP.Tails (Tails (..)) @@ -74,6 +83,8 @@ import Ouroboros.Consensus.Ledger.SupportsMempool (ByteSize32, IgnoringOverflow, TxMeasure) import Ouroboros.Consensus.Ledger.SupportsProtocol (LedgerSupportsProtocol) +import qualified Ouroboros.Consensus.Ledger.Tables.Diff as Diff +import Ouroboros.Consensus.Ledger.Tables.Utils import Ouroboros.Consensus.Protocol.Abstract import Ouroboros.Consensus.Protocol.PBFT (PBft, PBftCrypto) import Ouroboros.Consensus.Protocol.PBFT.State (PBftState) @@ -82,6 +93,7 @@ import Ouroboros.Consensus.Protocol.Praos (Praos) import qualified Ouroboros.Consensus.Protocol.Praos as Praos import Ouroboros.Consensus.Protocol.TPraos import qualified Ouroboros.Consensus.Protocol.TPraos as TPraos +import Ouroboros.Consensus.Shelley.HFEras () import Ouroboros.Consensus.Shelley.Ledger import Ouroboros.Consensus.Shelley.Node () import Ouroboros.Consensus.Shelley.Protocol.Praos () @@ -138,7 +150,7 @@ import Ouroboros.Consensus.Util.RedundantConstraints byronTransition :: PartialLedgerConfig ByronBlock -> Word16 -- ^ Shelley major protocol version - -> LedgerState ByronBlock + -> LedgerState ByronBlock mk -> Maybe EpochNo byronTransition ByronPartialLedgerConfig{..} shelleyMajorVersion state = takeAny @@ -276,6 +288,16 @@ type CardanoHardForkConstraints c = , DSIGN c ~ Ed25519DSIGN ) +-- | When performing era translations, two eras have special behaviours on the +-- ledger tables: +-- +-- * Byron to Shelley: as Byron has no tables, the whole UTxO set is computed as +-- insertions, note that it uses 'calculateAdditions' +-- +-- * Shelley to Allegra: some special addresses (the so called /AVVM/ +-- addresses), were deleted in this transition, which influenced things like +-- the calculation of later rewards. In this transition, we consume the +-- 'shelleyToAllegraAVVMsToDelete' as deletions in the ledger tables. instance CardanoHardForkConstraints c => CanHardFork (CardanoEras c) where type HardForkTxMeasure (CardanoEras c) = ConwayMeasure @@ -288,6 +310,14 @@ instance CardanoHardForkConstraints c => CanHardFork (CardanoEras c) where $ PCons translateLedgerStateAlonzoToBabbageWrapper $ PCons translateLedgerStateBabbageToConwayWrapper $ PNil + , translateLedgerTables = + PCons translateLedgerTablesByronToShelleyWrapper + $ PCons translateLedgerTablesShelleyToAllegraWrapper + $ PCons translateLedgerTablesAllegraToMaryWrapper + $ PCons translateLedgerTablesMaryToAlonzoWrapper + $ PCons translateLedgerTablesAlonzoToBabbageWrapper + $ PCons translateLedgerTablesBabbageToConwayWrapper + $ PNil , translateChainDepState = PCons translateChainDepStateByronToShelleyWrapper $ PCons translateChainDepStateAcrossShelley @@ -420,25 +450,39 @@ translateLedgerStateByronToShelleyWrapper :: ) => RequiringBoth WrapLedgerConfig - (Translate LedgerState) + TranslateLedgerState ByronBlock (ShelleyBlock (TPraos c) (ShelleyEra c)) translateLedgerStateByronToShelleyWrapper = - RequireBoth $ \_ (WrapLedgerConfig cfgShelley) -> - Translate $ \epochNo ledgerByron -> - ShelleyLedgerState { - shelleyLedgerTip = - translatePointByronToShelley - (ledgerTipPoint ledgerByron) - (byronLedgerTipBlockNo ledgerByron) - , shelleyLedgerState = - SL.translateToShelleyLedgerState - (toFromByronTranslationContext (shelleyLedgerGenesis cfgShelley)) - epochNo - (byronLedgerState ledgerByron) - , shelleyLedgerTransition = - ShelleyTransitionInfo{shelleyAfterVoting = 0} - } + RequireBoth + $ \_ (WrapLedgerConfig cfgShelley) -> + TranslateLedgerState { + translateLedgerStateWith = \epochNo ledgerByron -> + forgetTrackingValues + . calculateAdditions + . unstowLedgerTables + $ ShelleyLedgerState { + shelleyLedgerTip = + translatePointByronToShelley + (ledgerTipPoint ledgerByron) + (byronLedgerTipBlockNo ledgerByron) + , shelleyLedgerState = + SL.translateToShelleyLedgerState + (toFromByronTranslationContext (shelleyLedgerGenesis cfgShelley)) + epochNo + (byronLedgerState ledgerByron) + , shelleyLedgerTransition = + ShelleyTransitionInfo{shelleyAfterVoting = 0} + , shelleyLedgerTables = emptyLedgerTables + } + } + +translateLedgerTablesByronToShelleyWrapper :: + TranslateLedgerTables ByronBlock (ShelleyBlock (TPraos c) (ShelleyEra c)) +translateLedgerTablesByronToShelleyWrapper = TranslateLedgerTables { + translateTxInWith = absurd + , translateTxOutWith = absurd + } translateChainDepStateByronToShelleyWrapper :: RequiringBoth @@ -505,7 +549,7 @@ crossEraForecastByronToShelleyWrapper = ShelleyLedgerConfig (ShelleyEra c) -> Bound -> SlotNo - -> LedgerState ByronBlock + -> LedgerState ByronBlock mk -> Except OutsideForecastRange (WrapLedgerView (ShelleyBlock (TPraos c) (ShelleyEra c))) @@ -545,13 +589,65 @@ translateLedgerStateShelleyToAllegraWrapper :: (PraosCrypto c, DSignable c (Hash c EraIndependentTxBody)) => RequiringBoth WrapLedgerConfig - (Translate LedgerState) + TranslateLedgerState (ShelleyBlock (TPraos c) (ShelleyEra c)) (ShelleyBlock (TPraos c) (AllegraEra c)) translateLedgerStateShelleyToAllegraWrapper = ignoringBoth $ - Translate $ \_epochNo -> - unComp . SL.translateEra' SL.NoGenesis . Comp + TranslateLedgerState { + translateLedgerStateWith = \_epochNo ls -> + -- In the Shelley to Allegra transition, the AVVM addresses have + -- to be deleted, and their balance has to be moved to the + -- reserves. For this matter, the Ledger keeps track of these + -- small set of entries since the Byron to Shelley transition and + -- provides them to us through 'shelleyToAllegraAVVMsToDelete'. + -- + -- In the long run, the ledger will already use ledger states + -- parametrized by the map kind and therefore will already provide + -- the differences in this translation. + let avvms = SL.unUTxO + $ shelleyToAllegraAVVMsToDelete + $ shelleyLedgerState ls + + -- While techically we can diff the LedgerTables, it becomes + -- complex doing so, as we cannot perform operations with + -- 'LedgerTables l1 mk' and 'LedgerTables l2 mk'. Because of + -- this, for now we choose to generate the differences out of + -- thin air and when the time comes in which ticking produces + -- differences, we will have to revisit this. + avvmsAsDeletions = LedgerTables + . DiffMK + . Diff.fromMapDeletes + . Map.map Core.upgradeTxOut + $ avvms + + -- This 'stowLedgerTables' + 'withLedgerTables' injects the + -- values provided by the Ledger so that the translation + -- operation finds those entries in the UTxO and destroys + -- them, modifying the reserves accordingly. + stowedState = stowLedgerTables + . withLedgerTables ls + . LedgerTables + . ValuesMK + $ avvms + + resultingState = unFlip . unComp + . SL.translateEra' SL.NoGenesis + . Comp . Flip + $ stowedState + + in resultingState `withLedgerTables` avvmsAsDeletions + } + +translateLedgerTablesShelleyToAllegraWrapper :: + PraosCrypto c + => TranslateLedgerTables + (ShelleyBlock (TPraos c) (ShelleyEra c)) + (ShelleyBlock (TPraos c) (AllegraEra c)) +translateLedgerTablesShelleyToAllegraWrapper = TranslateLedgerTables { + translateTxInWith = id + , translateTxOutWith = Core.upgradeTxOut + } translateTxShelleyToAllegraWrapper :: (PraosCrypto c, DSignable c (Hash c EraIndependentTxBody)) @@ -577,13 +673,30 @@ translateLedgerStateAllegraToMaryWrapper :: (PraosCrypto c, DSignable c (Hash c EraIndependentTxBody)) => RequiringBoth WrapLedgerConfig - (Translate LedgerState) + TranslateLedgerState (ShelleyBlock (TPraos c) (AllegraEra c)) (ShelleyBlock (TPraos c) (MaryEra c)) translateLedgerStateAllegraToMaryWrapper = ignoringBoth $ - Translate $ \_epochNo -> - unComp . SL.translateEra' SL.NoGenesis . Comp + TranslateLedgerState { + translateLedgerStateWith = \_epochNo -> + noNewTickingDiffs + . unFlip + . unComp + . SL.translateEra' SL.NoGenesis + . Comp + . Flip + } + +translateLedgerTablesAllegraToMaryWrapper :: + PraosCrypto c + => TranslateLedgerTables + (ShelleyBlock (TPraos c) (AllegraEra c)) + (ShelleyBlock (TPraos c) (MaryEra c)) +translateLedgerTablesAllegraToMaryWrapper = TranslateLedgerTables { + translateTxInWith = id + , translateTxOutWith = Core.upgradeTxOut + } translateTxAllegraToMaryWrapper :: (PraosCrypto c, DSignable c (Hash c EraIndependentTxBody)) @@ -609,13 +722,30 @@ translateLedgerStateMaryToAlonzoWrapper :: (PraosCrypto c, DSignable c (Hash c EraIndependentTxBody)) => RequiringBoth WrapLedgerConfig - (Translate LedgerState) + TranslateLedgerState (ShelleyBlock (TPraos c) (MaryEra c)) (ShelleyBlock (TPraos c) (AlonzoEra c)) translateLedgerStateMaryToAlonzoWrapper = RequireBoth $ \_cfgMary cfgAlonzo -> - Translate $ \_epochNo -> - unComp . SL.translateEra' (getAlonzoTranslationContext cfgAlonzo) . Comp + TranslateLedgerState { + translateLedgerStateWith = \_epochNo -> + noNewTickingDiffs + . unFlip + . unComp + . SL.translateEra' (getAlonzoTranslationContext cfgAlonzo) + . Comp + . Flip + } + +translateLedgerTablesMaryToAlonzoWrapper :: + PraosCrypto c + => TranslateLedgerTables + (ShelleyBlock (TPraos c) (MaryEra c)) + (ShelleyBlock (TPraos c) (AlonzoEra c)) +translateLedgerTablesMaryToAlonzoWrapper = TranslateLedgerTables { + translateTxInWith = id + , translateTxOutWith = Core.upgradeTxOut + } getAlonzoTranslationContext :: WrapLedgerConfig (ShelleyBlock (TPraos c) (AlonzoEra c)) @@ -650,24 +780,43 @@ translateLedgerStateAlonzoToBabbageWrapper :: (Praos.PraosCrypto c, TPraos.PraosCrypto c) => RequiringBoth WrapLedgerConfig - (Translate LedgerState) + TranslateLedgerState (ShelleyBlock (TPraos c) (AlonzoEra c)) (ShelleyBlock (Praos c) (BabbageEra c)) translateLedgerStateAlonzoToBabbageWrapper = - RequireBoth $ \_cfgAlonzo _cfgBabbage -> - Translate $ \_epochNo -> - unComp . SL.translateEra' SL.NoGenesis . Comp . transPraosLS + RequireBoth $ \_cfgAlonzo _cfgBabbage -> + TranslateLedgerState { + translateLedgerStateWith = \_epochNo -> + noNewTickingDiffs + . unFlip + . unComp + . SL.translateEra' SL.NoGenesis + . Comp + . Flip + . transPraosLS + } where transPraosLS :: - LedgerState (ShelleyBlock (TPraos c) (AlonzoEra c)) -> - LedgerState (ShelleyBlock (Praos c) (AlonzoEra c)) - transPraosLS (ShelleyLedgerState wo nes st) = + LedgerState (ShelleyBlock (TPraos c) (AlonzoEra c)) mk -> + LedgerState (ShelleyBlock (Praos c) (AlonzoEra c)) mk + transPraosLS (ShelleyLedgerState wo nes st tb) = ShelleyLedgerState { shelleyLedgerTip = fmap castShelleyTip wo , shelleyLedgerState = nes , shelleyLedgerTransition = st + , shelleyLedgerTables = coerce tb } +translateLedgerTablesAlonzoToBabbageWrapper :: + Praos.PraosCrypto c + => TranslateLedgerTables + (ShelleyBlock (TPraos c) (AlonzoEra c)) + (ShelleyBlock (Praos c) (BabbageEra c)) +translateLedgerTablesAlonzoToBabbageWrapper = TranslateLedgerTables { + translateTxInWith = id + , translateTxOutWith = Core.upgradeTxOut + } + translateTxAlonzoToBabbageWrapper :: (Praos.PraosCrypto c) => SL.TranslationContext (BabbageEra c) @@ -709,16 +858,33 @@ translateValidatedTxAlonzoToBabbageWrapper ctxt = InjectValidatedTx $ -------------------------------------------------------------------------------} translateLedgerStateBabbageToConwayWrapper :: - (Praos.PraosCrypto c) + forall c. (Praos.PraosCrypto c) => RequiringBoth - WrapLedgerConfig - (Translate LedgerState) + WrapLedgerConfig + TranslateLedgerState + (ShelleyBlock (Praos c) (BabbageEra c)) + (ShelleyBlock (Praos c) (ConwayEra c)) +translateLedgerStateBabbageToConwayWrapper = + RequireBoth $ \_cfgBabbage cfgConway -> + TranslateLedgerState { + translateLedgerStateWith = \_epochNo -> + noNewTickingDiffs + . unFlip + . unComp + . SL.translateEra' (getConwayTranslationContext cfgConway) + . Comp + . Flip + } + +translateLedgerTablesBabbageToConwayWrapper :: + Praos.PraosCrypto c + => TranslateLedgerTables (ShelleyBlock (Praos c) (BabbageEra c)) (ShelleyBlock (Praos c) (ConwayEra c)) -translateLedgerStateBabbageToConwayWrapper = - RequireBoth $ \_cfgBabbage cfgConway -> - Translate $ \_epochNo -> - unComp . SL.translateEra' (getConwayTranslationContext cfgConway) . Comp +translateLedgerTablesBabbageToConwayWrapper = TranslateLedgerTables { + translateTxInWith = id + , translateTxOutWith = Core.upgradeTxOut + } getConwayTranslationContext :: WrapLedgerConfig (ShelleyBlock (Praos c) (ConwayEra c)) diff --git a/ouroboros-consensus-cardano/src/ouroboros-consensus-cardano/Ouroboros/Consensus/Cardano/Ledger.hs b/ouroboros-consensus-cardano/src/ouroboros-consensus-cardano/Ouroboros/Consensus/Cardano/Ledger.hs new file mode 100644 index 0000000000..097d59279a --- /dev/null +++ b/ouroboros-consensus-cardano/src/ouroboros-consensus-cardano/Ouroboros/Consensus/Cardano/Ledger.hs @@ -0,0 +1,209 @@ +{-# LANGUAGE CPP #-} +{-# LANGUAGE DataKinds #-} +{-# LANGUAGE DeriveAnyClass #-} +{-# LANGUAGE DeriveGeneric #-} +{-# LANGUAGE DerivingStrategies #-} +{-# LANGUAGE EmptyCase #-} +{-# LANGUAGE FlexibleContexts #-} +{-# LANGUAGE FlexibleInstances #-} +{-# LANGUAGE GeneralizedNewtypeDeriving #-} +{-# LANGUAGE LambdaCase #-} +{-# LANGUAGE RankNTypes #-} +{-# LANGUAGE ScopedTypeVariables #-} +{-# LANGUAGE TypeApplications #-} +{-# LANGUAGE TypeFamilies #-} +{-# LANGUAGE UndecidableInstances #-} + +{-# OPTIONS_GHC -Wno-orphans #-} + +module Ouroboros.Consensus.Cardano.Ledger (CardanoTxOut (..)) where + +import qualified Cardano.Ledger.Core as Core +import qualified Cardano.Ledger.Shelley.API as SL +import qualified Codec.CBOR.Decoding as CBOR +import qualified Codec.CBOR.Encoding as CBOR +import Data.SOP.Index +import qualified Data.SOP.InPairs as InPairs +import Data.Void +import GHC.Generics +import NoThunks.Class +import Ouroboros.Consensus.Cardano.Block +import Ouroboros.Consensus.Cardano.CanHardFork +import Ouroboros.Consensus.HardFork.Combinator +import Ouroboros.Consensus.HardFork.Combinator.State.Types +import Ouroboros.Consensus.Ledger.Tables +import Ouroboros.Consensus.Protocol.Praos (Praos) +import Ouroboros.Consensus.Protocol.TPraos (TPraos) +import Ouroboros.Consensus.Shelley.Ledger (ShelleyBlock) + +instance CardanoHardForkConstraints c + => HasCanonicalTxIn (CardanoEras c) where + newtype instance CanonicalTxIn (CardanoEras c) = CardanoTxIn { + getCardanoTxIn :: SL.TxIn c + } + deriving stock (Show, Eq, Ord) + deriving newtype NoThunks + + injectCanonicalTxIn IZ byronTxIn = absurd byronTxIn + injectCanonicalTxIn (IS idx) shelleyTxIn = case idx of + IZ -> CardanoTxIn shelleyTxIn + IS IZ -> CardanoTxIn shelleyTxIn + IS (IS IZ) -> CardanoTxIn shelleyTxIn + IS (IS (IS IZ)) -> CardanoTxIn shelleyTxIn + IS (IS (IS (IS IZ))) -> CardanoTxIn shelleyTxIn + IS (IS (IS (IS (IS IZ)))) -> CardanoTxIn shelleyTxIn + IS (IS (IS (IS (IS (IS idx'))))) -> case idx' of {} + + distribCanonicalTxIn IZ _ = + error "distribCanonicalTxIn: Byron has no TxIns" + distribCanonicalTxIn (IS idx) cardanoTxIn = case idx of + IZ -> getCardanoTxIn cardanoTxIn + IS IZ -> getCardanoTxIn cardanoTxIn + IS (IS IZ) -> getCardanoTxIn cardanoTxIn + IS (IS (IS IZ)) -> getCardanoTxIn cardanoTxIn + IS (IS (IS (IS IZ))) -> getCardanoTxIn cardanoTxIn + IS (IS (IS (IS (IS IZ)))) -> getCardanoTxIn cardanoTxIn + IS (IS (IS (IS (IS (IS idx'))))) -> case idx' of {} + + encodeCanonicalTxIn = Core.toEraCBOR @(ShelleyEra c) . getCardanoTxIn + + decodeCanonicalTxIn = CardanoTxIn <$> Core.fromEraCBOR @(ShelleyEra c) + +-- Unpacking the fields of the era-specific TxOuts could save a chunk of memory. +-- However, unpacking of sum types is only possible on @ghc-9.6.1@ and later, so +-- before @ghc-9.6.1@ we only unpack the TxOuts for eras before Alonzo. +-- +-- For more information on the @UNPACK@ pragma, see +-- https://downloads.haskell.org/ghc/latest/docs/users_guide/exts/pragmas.html#unpack-pragma +data CardanoTxOut c = +#if MIN_VERSION_GLASGOW_HASKELL(9,6,1,0) + ShelleyTxOut {-# UNPACK #-} !(Value (LedgerState (ShelleyBlock (TPraos c) (ShelleyEra c)))) + | AllegraTxOut {-# UNPACK #-} !(Value (LedgerState (ShelleyBlock (TPraos c) (AllegraEra c)))) + | MaryTxOut {-# UNPACK #-} !(Value (LedgerState (ShelleyBlock (TPraos c) (MaryEra c)))) + | AlonzoTxOut {-# UNPACK #-} !(Value (LedgerState (ShelleyBlock (TPraos c) (AlonzoEra c)))) + | BabbageTxOut {-# UNPACK #-} !(Value (LedgerState (ShelleyBlock (Praos c) (BabbageEra c)))) + | ConwayTxOut {-# UNPACK #-} !(Value (LedgerState (ShelleyBlock (Praos c) (ConwayEra c)))) +#else + ShelleyTxOut {-# UNPACK #-} !(Value (LedgerState (ShelleyBlock (TPraos c) (ShelleyEra c)))) + | AllegraTxOut {-# UNPACK #-} !(Value (LedgerState (ShelleyBlock (TPraos c) (AllegraEra c)))) + | MaryTxOut {-# UNPACK #-} !(Value (LedgerState (ShelleyBlock (TPraos c) (MaryEra c)))) + | AlonzoTxOut !(Value (LedgerState (ShelleyBlock (TPraos c) (AlonzoEra c)))) + | BabbageTxOut !(Value (LedgerState (ShelleyBlock (Praos c) (BabbageEra c)))) + | ConwayTxOut !(Value (LedgerState (ShelleyBlock (Praos c) (ConwayEra c)))) +#endif + deriving stock (Show, Eq, Generic) + deriving anyclass NoThunks + +instance CanHardFork (CardanoEras c) => HasHardForkTxOut (CardanoEras c) where + type instance HardForkTxOut (CardanoEras c) = CardanoTxOut c + injectHardForkTxOut IZ _txOut = error "Impossible: injecting TxOut from Byron" + injectHardForkTxOut (IS IZ) txOut = ShelleyTxOut txOut + injectHardForkTxOut (IS (IS IZ)) txOut = AllegraTxOut txOut + injectHardForkTxOut (IS (IS (IS IZ))) txOut = MaryTxOut txOut + injectHardForkTxOut (IS (IS (IS (IS IZ)))) txOut = AlonzoTxOut txOut + injectHardForkTxOut (IS (IS (IS (IS (IS IZ))))) txOut = BabbageTxOut txOut + injectHardForkTxOut (IS (IS (IS (IS (IS (IS IZ)))))) txOut = ConwayTxOut txOut + injectHardForkTxOut (IS (IS (IS (IS (IS (IS (IS idx))))))) _txOut = case idx of {} + + distribHardForkTxOut IZ = error "Impossible: distributing TxOut to Byron" + distribHardForkTxOut (IS IZ) = \case + ShelleyTxOut txout -> txout + _ -> error "Anachrony" + distribHardForkTxOut (IS (IS IZ)) = \case + ShelleyTxOut txout -> + case translateLedgerTables (hardForkEraTranslation @(CardanoEras c)) of + InPairs.PCons _ (InPairs.PCons p _) -> translateTxOutWith p txout + AllegraTxOut txout -> txout + _ -> error "Anachrony" + distribHardForkTxOut (IS (IS (IS IZ))) = \case + ShelleyTxOut txout -> + case translateLedgerTables (hardForkEraTranslation @(CardanoEras c)) of + InPairs.PCons _ (InPairs.PCons p1 (InPairs.PCons p2 _)) -> translateTxOutWith p2 $ translateTxOutWith p1 txout + AllegraTxOut txout -> + case translateLedgerTables (hardForkEraTranslation @(CardanoEras c)) of + InPairs.PCons _ (InPairs.PCons _ (InPairs.PCons p2 _)) -> translateTxOutWith p2 txout + MaryTxOut txout -> txout + _ -> error "Anachrony" + distribHardForkTxOut (IS (IS (IS (IS IZ)))) = \case + ShelleyTxOut txout -> + case translateLedgerTables (hardForkEraTranslation @(CardanoEras c)) of + InPairs.PCons _ (InPairs.PCons p1 (InPairs.PCons p2 (InPairs.PCons p3 _))) -> translateTxOutWith p3 $ translateTxOutWith p2 $ translateTxOutWith p1 txout + AllegraTxOut txout -> + case translateLedgerTables (hardForkEraTranslation @(CardanoEras c)) of + InPairs.PCons _ (InPairs.PCons _ (InPairs.PCons p2 (InPairs.PCons p3 _))) -> translateTxOutWith p3 $ translateTxOutWith p2 txout + MaryTxOut txout -> + case translateLedgerTables (hardForkEraTranslation @(CardanoEras c)) of + InPairs.PCons _ (InPairs.PCons _ (InPairs.PCons _ (InPairs.PCons p3 _))) -> translateTxOutWith p3 txout + AlonzoTxOut txout -> txout + _ -> error "Anachrony" + distribHardForkTxOut (IS (IS (IS (IS (IS IZ))))) = \case + ShelleyTxOut txout -> + case translateLedgerTables (hardForkEraTranslation @(CardanoEras c)) of + InPairs.PCons _ (InPairs.PCons p1 (InPairs.PCons p2 (InPairs.PCons p3 (InPairs.PCons p4 _)))) -> translateTxOutWith p4 $ translateTxOutWith p3 $ translateTxOutWith p2 $ translateTxOutWith p1 txout + AllegraTxOut txout -> + case translateLedgerTables (hardForkEraTranslation @(CardanoEras c)) of + InPairs.PCons _ (InPairs.PCons _ (InPairs.PCons p2 (InPairs.PCons p3 (InPairs.PCons p4 _)))) -> translateTxOutWith p4 $ translateTxOutWith p3 $ translateTxOutWith p2 txout + MaryTxOut txout -> + case translateLedgerTables (hardForkEraTranslation @(CardanoEras c)) of + InPairs.PCons _ (InPairs.PCons _ (InPairs.PCons _ (InPairs.PCons p3 (InPairs.PCons p4 _)))) -> translateTxOutWith p4 $ translateTxOutWith p3 txout + AlonzoTxOut txout -> + case translateLedgerTables (hardForkEraTranslation @(CardanoEras c)) of + InPairs.PCons _ (InPairs.PCons _ (InPairs.PCons _ (InPairs.PCons _ (InPairs.PCons p4 _)))) -> translateTxOutWith p4 txout + BabbageTxOut txout -> txout + _ -> error "Anachrony" + distribHardForkTxOut (IS (IS (IS (IS (IS (IS IZ)))))) = \case + ShelleyTxOut txout -> + case translateLedgerTables (hardForkEraTranslation @(CardanoEras c)) of + InPairs.PCons _ (InPairs.PCons p1 (InPairs.PCons p2 (InPairs.PCons p3 (InPairs.PCons p4 (InPairs.PCons p5 _))))) -> translateTxOutWith p5 $ translateTxOutWith p4 $ translateTxOutWith p3 $ translateTxOutWith p2 $ translateTxOutWith p1 txout + AllegraTxOut txout -> + case translateLedgerTables (hardForkEraTranslation @(CardanoEras c)) of + InPairs.PCons _ (InPairs.PCons _ (InPairs.PCons p2 (InPairs.PCons p3 (InPairs.PCons p4 (InPairs.PCons p5 _))))) -> translateTxOutWith p5 $ translateTxOutWith p4 $ translateTxOutWith p3 $ translateTxOutWith p2 txout + MaryTxOut txout -> + case translateLedgerTables (hardForkEraTranslation @(CardanoEras c)) of + InPairs.PCons _ (InPairs.PCons _ (InPairs.PCons _ (InPairs.PCons p3 (InPairs.PCons p4 (InPairs.PCons p5 _))))) -> translateTxOutWith p5 $ translateTxOutWith p4 $ translateTxOutWith p3 txout + AlonzoTxOut txout -> + case translateLedgerTables (hardForkEraTranslation @(CardanoEras c)) of + InPairs.PCons _ (InPairs.PCons _ (InPairs.PCons _ (InPairs.PCons _ (InPairs.PCons p4 (InPairs.PCons p5 _))))) -> translateTxOutWith p5 $ translateTxOutWith p4 txout + BabbageTxOut txout -> + case translateLedgerTables (hardForkEraTranslation @(CardanoEras c)) of + InPairs.PCons _ (InPairs.PCons _ (InPairs.PCons _ (InPairs.PCons _ (InPairs.PCons _ (InPairs.PCons p5 _))))) -> translateTxOutWith p5 txout + ConwayTxOut txout -> txout + distribHardForkTxOut (IS (IS (IS (IS (IS (IS (IS idx))))))) = case idx of {} + +instance CardanoHardForkConstraints c => SerializeHardForkTxOut (CardanoEras c) where + encodeHardForkTxOut _ (ShelleyTxOut txout) = + CBOR.encodeListLen 2 + <> CBOR.encodeWord8 1 + <> encodeValue (getLedgerTables $ codecLedgerTables @(LedgerState (ShelleyBlock (TPraos c) (ShelleyEra c)))) txout + encodeHardForkTxOut _ (AllegraTxOut txout) = + CBOR.encodeListLen 2 + <> CBOR.encodeWord8 2 + <> encodeValue (getLedgerTables $ codecLedgerTables @(LedgerState (ShelleyBlock (TPraos c) (AllegraEra c)))) txout + encodeHardForkTxOut _ (MaryTxOut txout) = + CBOR.encodeListLen 2 + <> CBOR.encodeWord8 3 + <> encodeValue (getLedgerTables $ codecLedgerTables @(LedgerState (ShelleyBlock (TPraos c) (MaryEra c)))) txout + encodeHardForkTxOut _ (AlonzoTxOut txout) = + CBOR.encodeListLen 2 + <> CBOR.encodeWord8 4 + <> encodeValue (getLedgerTables $ codecLedgerTables @(LedgerState (ShelleyBlock (TPraos c) (AlonzoEra c)))) txout + encodeHardForkTxOut _ (BabbageTxOut txout) = + CBOR.encodeListLen 2 + <> CBOR.encodeWord8 5 + <> encodeValue (getLedgerTables $ codecLedgerTables @(LedgerState (ShelleyBlock (Praos c) (BabbageEra c)))) txout + encodeHardForkTxOut _ (ConwayTxOut txout) = + CBOR.encodeListLen 2 + <> CBOR.encodeWord8 6 + <> encodeValue (getLedgerTables $ codecLedgerTables @(LedgerState (ShelleyBlock (Praos c) (ConwayEra c)))) txout + + decodeHardForkTxOut _ = do + CBOR.decodeListLenOf 2 + tag <- CBOR.decodeWord8 + case tag of + 1 -> ShelleyTxOut <$> decodeValue (getLedgerTables $ codecLedgerTables @(LedgerState (ShelleyBlock (TPraos c) (ShelleyEra c)))) + 2 -> AllegraTxOut <$> decodeValue (getLedgerTables $ codecLedgerTables @(LedgerState (ShelleyBlock (TPraos c) (AllegraEra c)))) + 3 -> MaryTxOut <$> decodeValue (getLedgerTables $ codecLedgerTables @(LedgerState (ShelleyBlock (TPraos c) (MaryEra c)))) + 4 -> AlonzoTxOut <$> decodeValue (getLedgerTables $ codecLedgerTables @(LedgerState (ShelleyBlock (TPraos c) (AlonzoEra c)))) + 5 -> BabbageTxOut <$> decodeValue (getLedgerTables $ codecLedgerTables @(LedgerState (ShelleyBlock (Praos c) (BabbageEra c)))) + 6 -> ConwayTxOut <$> decodeValue (getLedgerTables $ codecLedgerTables @(LedgerState (ShelleyBlock (Praos c) (ConwayEra c)))) + _ -> fail $ "Unkown TxOut tag: " <> show tag diff --git a/ouroboros-consensus-cardano/src/ouroboros-consensus-cardano/Ouroboros/Consensus/Cardano/Node.hs b/ouroboros-consensus-cardano/src/ouroboros-consensus-cardano/Ouroboros/Consensus/Cardano/Node.hs index f54245242f..129407883d 100644 --- a/ouroboros-consensus-cardano/src/ouroboros-consensus-cardano/Ouroboros/Consensus/Cardano/Node.hs +++ b/ouroboros-consensus-cardano/src/ouroboros-consensus-cardano/Ouroboros/Consensus/Cardano/Node.hs @@ -79,6 +79,7 @@ import Data.Functor.These (These1 (..)) import qualified Data.Map.Strict as Map import Data.SOP.BasicFunctors import Data.SOP.Counting +import Data.SOP.Functors (Flip (..)) import Data.SOP.Index (Index (..)) import Data.SOP.OptNP (NonEmptyOptNP, OptNP (OptSkip)) import qualified Data.SOP.OptNP as OptNP @@ -93,6 +94,8 @@ import Ouroboros.Consensus.Byron.Ledger.NetworkProtocolVersion import Ouroboros.Consensus.Byron.Node import Ouroboros.Consensus.Cardano.Block import Ouroboros.Consensus.Cardano.CanHardFork +import Ouroboros.Consensus.Cardano.Ledger () +import Ouroboros.Consensus.Cardano.QueryHF () import Ouroboros.Consensus.Config import Ouroboros.Consensus.HardFork.Combinator import Ouroboros.Consensus.HardFork.Combinator.Embed.Nary @@ -100,6 +103,8 @@ import Ouroboros.Consensus.HardFork.Combinator.Serialisation import qualified Ouroboros.Consensus.HardFork.History as History import Ouroboros.Consensus.HeaderValidation import Ouroboros.Consensus.Ledger.Extended +import Ouroboros.Consensus.Ledger.Tables +import Ouroboros.Consensus.Ledger.Tables.Utils (forgetLedgerTables) import Ouroboros.Consensus.Node.NetworkProtocolVersion import Ouroboros.Consensus.Node.ProtocolInfo import Ouroboros.Consensus.Node.Run @@ -967,21 +972,22 @@ protocolInfoCardano paramsCardano -- data from the genesis config (if provided) in the ledger state. For -- example, this includes initial staking and initial funds (useful for -- testing/benchmarking). - initExtLedgerStateCardano :: ExtLedgerState (CardanoBlock c) + initExtLedgerStateCardano :: ExtLedgerState (CardanoBlock c) ValuesMK initExtLedgerStateCardano = ExtLedgerState { headerState = initHeaderState - , ledgerState = - HardForkLedgerState - . hap (fn id :* registerAny) - $ hardForkLedgerStatePerEra initLedgerState + , ledgerState = overShelleyBasedLedgerState initLedgerState } where + overShelleyBasedLedgerState (HardForkLedgerState st) = + HardForkLedgerState $ hap (fn id :* registerAny) st + initHeaderState :: HeaderState (CardanoBlock c) - initLedgerState :: LedgerState (CardanoBlock c) + initLedgerState :: LedgerState (CardanoBlock c) ValuesMK ExtLedgerState initLedgerState initHeaderState = - injectInitialExtLedgerState cfg initExtLedgerStateByron + injectInitialExtLedgerState cfg + $ initExtLedgerStateByron - registerAny :: NP (LedgerState -.-> LedgerState) (CardanoShelleyEras c) + registerAny :: NP (Flip LedgerState ValuesMK -.-> Flip LedgerState ValuesMK) (CardanoShelleyEras c) registerAny = hcmap (Proxy @IsShelleyBlock) injectIntoTestState $ WrapTransitionConfig transitionConfigShelley @@ -993,11 +999,13 @@ protocolInfoCardano paramsCardano :* Nil injectIntoTestState :: - L.EraTransition era + ShelleyBasedEra era => WrapTransitionConfig (ShelleyBlock proto era) - -> (LedgerState -.-> LedgerState) (ShelleyBlock proto era) - injectIntoTestState (WrapTransitionConfig cfg) = fn $ \st -> st { - Shelley.shelleyLedgerState = L.injectIntoTestState cfg (Shelley.shelleyLedgerState st) + -> (Flip LedgerState ValuesMK -.-> Flip LedgerState ValuesMK) (ShelleyBlock proto era) + injectIntoTestState (WrapTransitionConfig cfg) = fn $ \(Flip st) -> + Flip $ unstowLedgerTables $ forgetLedgerTables $ st { + Shelley.shelleyLedgerState = L.injectIntoTestState cfg + (Shelley.shelleyLedgerState $ stowLedgerTables st) } -- | For each element in the list, a block forging thread will be started. diff --git a/ouroboros-consensus-cardano/src/ouroboros-consensus-cardano/Ouroboros/Consensus/Cardano/QueryHF.hs b/ouroboros-consensus-cardano/src/ouroboros-consensus-cardano/Ouroboros/Consensus/Cardano/QueryHF.hs new file mode 100644 index 0000000000..ea5290eaec --- /dev/null +++ b/ouroboros-consensus-cardano/src/ouroboros-consensus-cardano/Ouroboros/Consensus/Cardano/QueryHF.hs @@ -0,0 +1,142 @@ +{-# LANGUAGE CPP #-} +{-# LANGUAGE ConstraintKinds #-} +{-# LANGUAGE DataKinds #-} +{-# LANGUAGE DerivingStrategies #-} +{-# LANGUAGE EmptyCase #-} +{-# LANGUAGE FlexibleContexts #-} +{-# LANGUAGE FlexibleInstances #-} +{-# LANGUAGE LambdaCase #-} +{-# LANGUAGE MultiParamTypeClasses #-} +{-# LANGUAGE PolyKinds #-} +{-# LANGUAGE RankNTypes #-} +{-# LANGUAGE ScopedTypeVariables #-} +{-# LANGUAGE TypeFamilyDependencies #-} +{-# LANGUAGE UndecidableInstances #-} +{-# LANGUAGE UndecidableSuperClasses #-} + +{-# OPTIONS_GHC -Wno-orphans #-} +#if __GLASGOW_HASKELL__ <= 906 +{-# OPTIONS_GHC -Wno-incomplete-patterns + -Wno-incomplete-uni-patterns + -Wno-incomplete-record-updates + -Wno-overlapping-patterns #-} +#endif + +module Ouroboros.Consensus.Cardano.QueryHF () where + +import Data.SOP.Index +import Ouroboros.Consensus.Byron.Ledger +import Ouroboros.Consensus.Byron.Node () +import Ouroboros.Consensus.Cardano.Block +import Ouroboros.Consensus.Cardano.CanHardFork +import Ouroboros.Consensus.Cardano.Ledger +import Ouroboros.Consensus.HardFork.Combinator +import Ouroboros.Consensus.Ledger.Query +import Ouroboros.Consensus.Shelley.HFEras () +import Ouroboros.Consensus.Shelley.Ledger +import Ouroboros.Consensus.Shelley.Node () +import Ouroboros.Consensus.Shelley.Protocol.Praos () + +instance CardanoHardForkConstraints c => BlockSupportsHFLedgerQuery (CardanoEras c) where + answerBlockQueryHFLookup IZ _cfg (q :: BlockQuery ByronBlock QFLookupTables result) _dlv = + case q of {} + answerBlockQueryHFLookup idx@(IS IZ) cfg q dlv = + answerShelleyLookupQueries idx cfg q dlv + answerBlockQueryHFLookup idx@(IS (IS IZ)) cfg q dlv = + answerShelleyLookupQueries idx cfg q dlv + answerBlockQueryHFLookup idx@(IS (IS (IS IZ))) cfg q dlv = + answerShelleyLookupQueries idx cfg q dlv + answerBlockQueryHFLookup idx@(IS (IS (IS (IS IZ)))) cfg q dlv = + answerShelleyLookupQueries idx cfg q dlv + answerBlockQueryHFLookup idx@(IS (IS (IS (IS (IS IZ))))) cfg q dlv = + answerShelleyLookupQueries idx cfg q dlv + answerBlockQueryHFLookup idx@(IS (IS (IS (IS (IS (IS IZ)))))) cfg q dlv = + answerShelleyLookupQueries idx cfg q dlv + answerBlockQueryHFLookup (IS (IS (IS (IS (IS (IS (IS idx))))))) _cfg _q _dlv = + case idx of {} + + answerBlockQueryHFTraverse IZ _cfg (q :: BlockQuery ByronBlock QFTraverseTables result) _dlv = + case q of {} + answerBlockQueryHFTraverse idx@(IS IZ) cfg q dlv = + answerShelleyTraversingQueries idx cfg q dlv + answerBlockQueryHFTraverse idx@(IS (IS IZ)) cfg q dlv = + answerShelleyTraversingQueries idx cfg q dlv + answerBlockQueryHFTraverse idx@(IS (IS (IS IZ))) cfg q dlv = + answerShelleyTraversingQueries idx cfg q dlv + answerBlockQueryHFTraverse idx@(IS (IS (IS (IS IZ)))) cfg q dlv = + answerShelleyTraversingQueries idx cfg q dlv + answerBlockQueryHFTraverse idx@(IS (IS (IS (IS (IS IZ))))) cfg q dlv = + answerShelleyTraversingQueries idx cfg q dlv + answerBlockQueryHFTraverse idx@(IS (IS (IS (IS (IS (IS IZ)))))) cfg q dlv = + answerShelleyTraversingQueries idx cfg q dlv + answerBlockQueryHFTraverse (IS (IS (IS (IS (IS (IS (IS idx))))))) _cfg _q _dlv = + case idx of {} + + queryLedgerGetTraversingFilter IZ (q :: BlockQuery ByronBlock QFTraverseTables result) = case q of {} + queryLedgerGetTraversingFilter idx@(IS IZ) q = case q of + GetUTxOByAddress addrs -> \case + ShelleyTxOut x -> filterGetUTxOByAddressOne addrs x + AllegraTxOut x -> filterGetUTxOByAddressOne addrs x + MaryTxOut x -> filterGetUTxOByAddressOne addrs x + AlonzoTxOut x -> filterGetUTxOByAddressOne addrs x + BabbageTxOut x -> filterGetUTxOByAddressOne addrs x + ConwayTxOut x -> filterGetUTxOByAddressOne addrs x + GetUTxOWhole -> + const True + GetCBOR q' -> queryLedgerGetTraversingFilter idx q' + queryLedgerGetTraversingFilter idx@(IS (IS IZ)) q = case q of + GetUTxOByAddress addrs -> \case + ShelleyTxOut x -> filterGetUTxOByAddressOne addrs x + AllegraTxOut x -> filterGetUTxOByAddressOne addrs x + MaryTxOut x -> filterGetUTxOByAddressOne addrs x + AlonzoTxOut x -> filterGetUTxOByAddressOne addrs x + BabbageTxOut x -> filterGetUTxOByAddressOne addrs x + ConwayTxOut x -> filterGetUTxOByAddressOne addrs x + GetUTxOWhole -> + const True + GetCBOR q' -> queryLedgerGetTraversingFilter idx q' + queryLedgerGetTraversingFilter idx@(IS (IS (IS IZ))) q = case q of + GetUTxOByAddress addrs -> \case + ShelleyTxOut x -> filterGetUTxOByAddressOne addrs x + AllegraTxOut x -> filterGetUTxOByAddressOne addrs x + MaryTxOut x -> filterGetUTxOByAddressOne addrs x + AlonzoTxOut x -> filterGetUTxOByAddressOne addrs x + BabbageTxOut x -> filterGetUTxOByAddressOne addrs x + ConwayTxOut x -> filterGetUTxOByAddressOne addrs x + GetUTxOWhole -> + const True + GetCBOR q' -> queryLedgerGetTraversingFilter idx q' + queryLedgerGetTraversingFilter idx@(IS (IS (IS (IS IZ)))) q = case q of + GetUTxOByAddress addrs -> \case + ShelleyTxOut x -> filterGetUTxOByAddressOne addrs x + AllegraTxOut x -> filterGetUTxOByAddressOne addrs x + MaryTxOut x -> filterGetUTxOByAddressOne addrs x + AlonzoTxOut x -> filterGetUTxOByAddressOne addrs x + BabbageTxOut x -> filterGetUTxOByAddressOne addrs x + ConwayTxOut x -> filterGetUTxOByAddressOne addrs x + GetUTxOWhole -> + const True + GetCBOR q' -> queryLedgerGetTraversingFilter idx q' + queryLedgerGetTraversingFilter idx@(IS (IS (IS (IS (IS IZ))))) q = case q of + GetUTxOByAddress addrs -> \case + ShelleyTxOut x -> filterGetUTxOByAddressOne addrs x + AllegraTxOut x -> filterGetUTxOByAddressOne addrs x + MaryTxOut x -> filterGetUTxOByAddressOne addrs x + AlonzoTxOut x -> filterGetUTxOByAddressOne addrs x + BabbageTxOut x -> filterGetUTxOByAddressOne addrs x + ConwayTxOut x -> filterGetUTxOByAddressOne addrs x + GetUTxOWhole -> + const True + GetCBOR q' -> queryLedgerGetTraversingFilter idx q' + queryLedgerGetTraversingFilter idx@(IS (IS (IS (IS (IS (IS IZ)))))) q = case q of + GetUTxOByAddress addrs -> \case + ShelleyTxOut x -> filterGetUTxOByAddressOne addrs x + AllegraTxOut x -> filterGetUTxOByAddressOne addrs x + MaryTxOut x -> filterGetUTxOByAddressOne addrs x + AlonzoTxOut x -> filterGetUTxOByAddressOne addrs x + BabbageTxOut x -> filterGetUTxOByAddressOne addrs x + ConwayTxOut x -> filterGetUTxOByAddressOne addrs x + GetUTxOWhole -> + const True + GetCBOR q' -> queryLedgerGetTraversingFilter idx q' + queryLedgerGetTraversingFilter (IS (IS (IS (IS (IS (IS (IS idx))))))) _ = case idx of {} diff --git a/ouroboros-consensus-cardano/src/shelley/Ouroboros/Consensus/Shelley/Eras.hs b/ouroboros-consensus-cardano/src/shelley/Ouroboros/Consensus/Shelley/Eras.hs index f8217118af..c1b2d8dd17 100644 --- a/ouroboros-consensus-cardano/src/shelley/Ouroboros/Consensus/Shelley/Eras.hs +++ b/ouroboros-consensus-cardano/src/shelley/Ouroboros/Consensus/Shelley/Eras.hs @@ -145,6 +145,9 @@ class ( Core.EraSegWits era , NoThunks (PredicateFailure (Core.EraRule "BBODY" era)) , NoThunks (Core.TranslationContext era) + , DecCBOR (SL.TxIn (EraCrypto era)) + , EncCBOR (SL.TxIn (EraCrypto era)) + ) => ShelleyBasedEra era where applyShelleyBasedTx :: diff --git a/ouroboros-consensus-cardano/src/shelley/Ouroboros/Consensus/Shelley/Ledger/Forge.hs b/ouroboros-consensus-cardano/src/shelley/Ouroboros/Consensus/Shelley/Ledger/Forge.hs index bfcf43ee94..85dcde72a4 100644 --- a/ouroboros-consensus-cardano/src/shelley/Ouroboros/Consensus/Shelley/Ledger/Forge.hs +++ b/ouroboros-consensus-cardano/src/shelley/Ouroboros/Consensus/Shelley/Ledger/Forge.hs @@ -34,15 +34,15 @@ import Ouroboros.Consensus.Shelley.Protocol.Abstract (ProtoCrypto, -------------------------------------------------------------------------------} forgeShelleyBlock :: - forall m era proto. + forall m era proto mk. (ShelleyCompatible proto era, Monad m) => HotKey (EraCrypto era) m -> CanBeLeader proto -> TopLevelConfig (ShelleyBlock proto era) - -> BlockNo -- ^ Current block number - -> SlotNo -- ^ Current slot number - -> TickedLedgerState (ShelleyBlock proto era) -- ^ Current ledger - -> [Validated (GenTx (ShelleyBlock proto era))] -- ^ Txs to include + -> BlockNo -- ^ Current block number + -> SlotNo -- ^ Current slot number + -> TickedLedgerState (ShelleyBlock proto era) mk -- ^ Current ledger + -> [Validated (GenTx (ShelleyBlock proto era))] -- ^ Txs to include -> IsLeader proto -> m (ShelleyBlock proto era) forgeShelleyBlock diff --git a/ouroboros-consensus-cardano/src/shelley/Ouroboros/Consensus/Shelley/Ledger/Inspect.hs b/ouroboros-consensus-cardano/src/shelley/Ouroboros/Consensus/Shelley/Ledger/Inspect.hs index d8589fb1ce..df2cc9635e 100644 --- a/ouroboros-consensus-cardano/src/shelley/Ouroboros/Consensus/Shelley/Ledger/Inspect.hs +++ b/ouroboros-consensus-cardano/src/shelley/Ouroboros/Consensus/Shelley/Ledger/Inspect.hs @@ -52,8 +52,8 @@ instance ShelleyBasedEra era => InspectLedger (ShelleyBlock proto era) where updatesAfter = pparamsUpdate after pparamsUpdate :: - forall era proto. ShelleyBasedEra era - => LedgerState (ShelleyBlock proto era) + forall era proto mk. ShelleyBasedEra era + => LedgerState (ShelleyBlock proto era) mk -> ShelleyLedgerUpdate era pparamsUpdate st = let nes = shelleyLedgerState st diff --git a/ouroboros-consensus-cardano/src/shelley/Ouroboros/Consensus/Shelley/Ledger/Ledger.hs b/ouroboros-consensus-cardano/src/shelley/Ouroboros/Consensus/Shelley/Ledger/Ledger.hs index 380f97f562..8f3cab0ff0 100644 --- a/ouroboros-consensus-cardano/src/shelley/Ouroboros/Consensus/Shelley/Ledger/Ledger.hs +++ b/ouroboros-consensus-cardano/src/shelley/Ouroboros/Consensus/Shelley/Ledger/Ledger.hs @@ -21,11 +21,13 @@ module Ouroboros.Consensus.Shelley.Ledger.Ledger ( LedgerState (..) + , LedgerTables (..) , ShelleyBasedEra , ShelleyLedgerError (..) , ShelleyTip (..) , ShelleyTransition (..) , Ticked (..) + , Ticked1 (..) , castShelleyTip , shelleyLedgerTipPoint , shelleyTipToPoint @@ -45,14 +47,19 @@ module Ouroboros.Consensus.Shelley.Ledger.Ledger ( , encodeShelleyAnnTip , encodeShelleyHeaderState , encodeShelleyLedgerState + -- * Low-level UTxO manipulations + , projectUtxoSL + , withUtxoSL ) where import qualified Cardano.Ledger.BaseTypes as SL (epochInfoPure) import qualified Cardano.Ledger.BHeaderView as SL (BHeaderView) import Cardano.Ledger.Binary.Plain (FromCBOR (..), ToCBOR (..), enforceSize) +import qualified Cardano.Ledger.Block as Core import Cardano.Ledger.Core (Era, ppMaxBHSizeL, ppMaxTxSizeL) import qualified Cardano.Ledger.Core as Core +import qualified Cardano.Ledger.Crypto as Crypto import qualified Cardano.Ledger.Shelley.API as SL import qualified Cardano.Ledger.Shelley.Governance as SL import qualified Cardano.Ledger.Shelley.LedgerState as SL @@ -84,6 +91,7 @@ import Ouroboros.Consensus.HeaderValidation import Ouroboros.Consensus.Ledger.Abstract import Ouroboros.Consensus.Ledger.CommonProtocolParams import Ouroboros.Consensus.Ledger.Extended +import Ouroboros.Consensus.Ledger.Tables.Utils import Ouroboros.Consensus.Protocol.Ledger.Util (isNewEpoch) import Ouroboros.Consensus.Shelley.Eras (EraCrypto) import Ouroboros.Consensus.Shelley.Ledger.Block @@ -91,7 +99,6 @@ import Ouroboros.Consensus.Shelley.Ledger.Config import Ouroboros.Consensus.Shelley.Ledger.Protocol () import Ouroboros.Consensus.Shelley.Protocol.Abstract (EnvelopeCheckError, envelopeChecks, mkHeaderView) -import Ouroboros.Consensus.Util ((..:)) import Ouroboros.Consensus.Util.CBOR (decodeWithOrigin, encodeWithOrigin) import Ouroboros.Consensus.Util.Versioned @@ -124,6 +131,10 @@ data ShelleyLedgerConfig era = ShelleyLedgerConfig { deriving instance (NoThunks (Core.TranslationContext era), Era era) => NoThunks (ShelleyLedgerConfig era) +deriving instance ( Crypto.Crypto (EraCrypto era) + , Show (Core.TranslationContext era) + ) => Show (ShelleyLedgerConfig era) + shelleyLedgerGenesis :: ShelleyLedgerConfig era -> SL.ShelleyGenesis (EraCrypto era) shelleyLedgerGenesis = getCompactGenesis . shelleyLedgerCompactGenesis @@ -198,16 +209,20 @@ castShelleyTip (ShelleyTip sn bn hh) = ShelleyTip { , shelleyTipHash = coerce hh } -data instance LedgerState (ShelleyBlock proto era) = ShelleyLedgerState { +data instance LedgerState (ShelleyBlock proto era) mk = ShelleyLedgerState { shelleyLedgerTip :: !(WithOrigin (ShelleyTip proto era)) , shelleyLedgerState :: !(SL.NewEpochState era) , shelleyLedgerTransition :: !ShelleyTransition + , shelleyLedgerTables :: !(LedgerTables (LedgerState (ShelleyBlock proto era)) mk) } deriving (Generic) -deriving instance ShelleyBasedEra era => Show (LedgerState (ShelleyBlock proto era)) -deriving instance ShelleyBasedEra era => Eq (LedgerState (ShelleyBlock proto era)) -deriving instance ShelleyBasedEra era => NoThunks (LedgerState (ShelleyBlock proto era)) +deriving instance (ShelleyBasedEra era, EqMK mk) + => Eq (LedgerState (ShelleyBlock proto era) mk) +deriving instance (ShelleyBasedEra era, NoThunksMK mk) + => NoThunks (LedgerState (ShelleyBlock proto era) mk) +deriving instance (ShelleyBasedEra era, ShowMK mk) + => Show (LedgerState (ShelleyBlock proto era) mk) -- | Information required to determine the hard fork point from Shelley to the -- next ledger @@ -234,11 +249,155 @@ newtype ShelleyTransition = ShelleyTransitionInfo { deriving stock (Eq, Show, Generic) deriving newtype (NoThunks) -shelleyLedgerTipPoint :: LedgerState (ShelleyBlock proto era) -> Point (ShelleyBlock proto era) +shelleyLedgerTipPoint :: + LedgerState (ShelleyBlock proto era) mk + -> Point (ShelleyBlock proto era) shelleyLedgerTipPoint = shelleyTipToPoint . shelleyLedgerTip instance ShelleyCompatible proto era => UpdateLedger (ShelleyBlock proto era) +type instance Key (LedgerState (ShelleyBlock proto era)) = SL.TxIn (EraCrypto era) +type instance Value (LedgerState (ShelleyBlock proto era)) = Core.TxOut era + +instance ShelleyBasedEra era + => HasLedgerTables (LedgerState (ShelleyBlock proto era)) where + projectLedgerTables = shelleyLedgerTables + withLedgerTables st tables = + ShelleyLedgerState { + shelleyLedgerTip + , shelleyLedgerState + , shelleyLedgerTransition + , shelleyLedgerTables = tables + } + where + ShelleyLedgerState { + shelleyLedgerTip + , shelleyLedgerState + , shelleyLedgerTransition + } = st + +instance ShelleyBasedEra era + => HasLedgerTables (Ticked1 (LedgerState (ShelleyBlock proto era))) where + projectLedgerTables = castLedgerTables . tickedShelleyLedgerTables + withLedgerTables st tables = + TickedShelleyLedgerState { + untickedShelleyLedgerTip + , tickedShelleyLedgerTransition + , tickedShelleyLedgerState + , tickedShelleyLedgerTables = castLedgerTables tables + } + where + TickedShelleyLedgerState { + untickedShelleyLedgerTip + , tickedShelleyLedgerTransition + , tickedShelleyLedgerState + } = st + +instance ShelleyBasedEra era + => CanSerializeLedgerTables (LedgerState (ShelleyBlock proto era)) where + codecLedgerTables = LedgerTables (CodecMK + (Core.toEraCBOR @era) + (Core.toEraCBOR @era) + (Core.fromEraCBOR @era) + (Core.fromEraShareCBOR @era)) + +instance ShelleyBasedEra era + => CanStowLedgerTables (LedgerState (ShelleyBlock proto era)) where + stowLedgerTables st = + ShelleyLedgerState { + shelleyLedgerTip = shelleyLedgerTip + , shelleyLedgerState = + shelleyLedgerState `withUtxoSL` getLedgerTables shelleyLedgerTables + , shelleyLedgerTransition = shelleyLedgerTransition + , shelleyLedgerTables = emptyLedgerTables + } + where + ShelleyLedgerState { + shelleyLedgerTip + , shelleyLedgerState + , shelleyLedgerTransition + , shelleyLedgerTables + } = st + unstowLedgerTables st = + ShelleyLedgerState { + shelleyLedgerTip = shelleyLedgerTip + , shelleyLedgerState = + shelleyLedgerState `withUtxoSL` emptyMK + , shelleyLedgerTransition = shelleyLedgerTransition + , shelleyLedgerTables = + LedgerTables $ projectUtxoSL shelleyLedgerState + } + where + ShelleyLedgerState { + shelleyLedgerTip + , shelleyLedgerState + , shelleyLedgerTransition + } = st + +instance ShelleyBasedEra era + => CanStowLedgerTables (Ticked1 (LedgerState (ShelleyBlock proto era))) where + stowLedgerTables st = + TickedShelleyLedgerState { + untickedShelleyLedgerTip = untickedShelleyLedgerTip + , tickedShelleyLedgerTransition = tickedShelleyLedgerTransition + , tickedShelleyLedgerState = + tickedShelleyLedgerState `withUtxoSL` getLedgerTables tickedShelleyLedgerTables + , tickedShelleyLedgerTables = emptyLedgerTables + } + where + TickedShelleyLedgerState { + untickedShelleyLedgerTip + , tickedShelleyLedgerTransition + , tickedShelleyLedgerState + , tickedShelleyLedgerTables + } = st + + unstowLedgerTables st = + TickedShelleyLedgerState { + untickedShelleyLedgerTip = untickedShelleyLedgerTip + , tickedShelleyLedgerTransition = tickedShelleyLedgerTransition + , tickedShelleyLedgerState = + tickedShelleyLedgerState `withUtxoSL` emptyMK + , tickedShelleyLedgerTables = + LedgerTables $ projectUtxoSL tickedShelleyLedgerState + } + where + TickedShelleyLedgerState { + untickedShelleyLedgerTip + , tickedShelleyLedgerTransition + , tickedShelleyLedgerState + } = st + +projectUtxoSL :: + SL.NewEpochState era + -> ValuesMK (SL.TxIn (EraCrypto era)) (Core.TxOut era) +projectUtxoSL = + ValuesMK + . SL.unUTxO + . SL.utxosUtxo + . SL.lsUTxOState + . SL.esLState + . SL.nesEs + +withUtxoSL :: + SL.NewEpochState era + -> ValuesMK (SL.TxIn (EraCrypto era)) (Core.TxOut era) + -> SL.NewEpochState era +withUtxoSL nes (ValuesMK m) = + nes { + SL.nesEs = es { + SL.esLState = us { + SL.lsUTxOState = utxo { + SL.utxosUtxo = SL.UTxO m + } + } + } + } + where + es = SL.nesEs nes + us = SL.esLState es + utxo = SL.lsUTxOState us + {------------------------------------------------------------------------------- GetTip -------------------------------------------------------------------------------} @@ -246,7 +405,7 @@ instance ShelleyCompatible proto era => UpdateLedger (ShelleyBlock proto era) instance GetTip (LedgerState (ShelleyBlock proto era)) where getTip = castPoint . shelleyLedgerTipPoint -instance GetTip (Ticked (LedgerState (ShelleyBlock proto era))) where +instance GetTip (Ticked1 (LedgerState (ShelleyBlock proto era))) where getTip = castPoint . untickedShelleyLedgerTipPoint {------------------------------------------------------------------------------- @@ -254,7 +413,7 @@ instance GetTip (Ticked (LedgerState (ShelleyBlock proto era))) where -------------------------------------------------------------------------------} -- | Ticking only affects the state itself -data instance Ticked (LedgerState (ShelleyBlock proto era)) = TickedShelleyLedgerState { +data instance Ticked1 (LedgerState (ShelleyBlock proto era)) mk = TickedShelleyLedgerState { untickedShelleyLedgerTip :: !(WithOrigin (ShelleyTip proto era)) -- | We are counting blocks within an epoch, this means: -- @@ -263,14 +422,13 @@ data instance Ticked (LedgerState (ShelleyBlock proto era)) = TickedShelleyLedge -- must be reset when /ticking/, not when applying a block. , tickedShelleyLedgerTransition :: !ShelleyTransition , tickedShelleyLedgerState :: !(SL.NewEpochState era) + , tickedShelleyLedgerTables :: + !(LedgerTables (LedgerState (ShelleyBlock proto era)) mk) } deriving (Generic) -deriving instance ShelleyBasedEra era - => NoThunks (Ticked (LedgerState (ShelleyBlock proto era))) - untickedShelleyLedgerTipPoint :: - Ticked (LedgerState (ShelleyBlock proto era)) + TickedLedgerState (ShelleyBlock proto era) mk -> Point (ShelleyBlock proto era) untickedShelleyLedgerTipPoint = shelleyTipToPoint . untickedShelleyLedgerTip @@ -286,15 +444,15 @@ instance ShelleyBasedEra era => IsLedger (LedgerState (ShelleyBlock proto era)) } = swizzle appTick <&> \l' -> TickedShelleyLedgerState { - untickedShelleyLedgerTip = - shelleyLedgerTip + untickedShelleyLedgerTip = shelleyLedgerTip , tickedShelleyLedgerTransition = -- The voting resets each epoch if isNewEpoch ei (shelleyTipSlotNo <$> shelleyLedgerTip) slotNo then ShelleyTransitionInfo { shelleyAfterVoting = 0 } else shelleyLedgerTransition - , tickedShelleyLedgerState = l' + , tickedShelleyLedgerState = l' + , tickedShelleyLedgerTables = emptyLedgerTables } where globals = shelleyLedgerGlobals cfg @@ -378,6 +536,12 @@ instance ShelleyCompatible proto era , asoEvents = STS.EPReturn } + getBlockKeySets = + LedgerTables + . KeysMK + . Core.neededTxInsForBlock + . shelleyBlockRaw + data ShelleyReapplyException = forall era. Show (SL.BlockTransitionError era) => ShelleyReapplyException (SL.BlockTransitionError era) @@ -388,7 +552,7 @@ instance Show ShelleyReapplyException where instance Exception.Exception ShelleyReapplyException where applyHelper :: - (ShelleyCompatible proto era, Monad m) + forall proto m era. (ShelleyCompatible proto era, Monad m) => ( SL.Globals -> SL.NewEpochState era -> SL.Block (SL.BHeaderView (EraCrypto era)) era @@ -399,14 +563,16 @@ applyHelper :: ) -> LedgerConfig (ShelleyBlock proto era) -> ShelleyBlock proto era - -> Ticked (LedgerState (ShelleyBlock proto era)) + -> TickedLedgerState (ShelleyBlock proto era) ValuesMK -> m (LedgerResult (LedgerState (ShelleyBlock proto era)) - (LedgerState (ShelleyBlock proto era))) -applyHelper f cfg blk TickedShelleyLedgerState{ - tickedShelleyLedgerTransition - , tickedShelleyLedgerState - } = do + (LedgerState (ShelleyBlock proto era) DiffMK)) +applyHelper f cfg blk stBefore = do + let TickedShelleyLedgerState{ + tickedShelleyLedgerTransition + , tickedShelleyLedgerState + } = stowLedgerTables stBefore + ledgerResult <- f globals @@ -420,22 +586,31 @@ applyHelper f cfg blk TickedShelleyLedgerState{ in SL.UnsafeUnserialisedBlock h' (SL.bbody b) ) - return $ ledgerResult <&> \newNewEpochState -> ShelleyLedgerState { - shelleyLedgerTip = NotOrigin ShelleyTip { - shelleyTipBlockNo = blockNo blk - , shelleyTipSlotNo = blockSlot blk - , shelleyTipHash = blockHash blk - } - , shelleyLedgerState = - newNewEpochState - , shelleyLedgerTransition = ShelleyTransitionInfo { - shelleyAfterVoting = - -- We count the number of blocks that have been applied after the - -- voting deadline has passed. - (if blockSlot blk >= votingDeadline then succ else id) $ - shelleyAfterVoting tickedShelleyLedgerTransition - } - } + let track :: + LedgerState (ShelleyBlock proto era) ValuesMK + -> LedgerState (ShelleyBlock proto era) TrackingMK + track = calculateDifference stBefore + + + return $ ledgerResult <&> \newNewEpochState -> + forgetTrackingValues $ track $ unstowLedgerTables $ + ShelleyLedgerState { + shelleyLedgerTip = NotOrigin ShelleyTip { + shelleyTipBlockNo = blockNo blk + , shelleyTipSlotNo = blockSlot blk + , shelleyTipHash = blockHash blk + } + , shelleyLedgerState = + newNewEpochState + , shelleyLedgerTransition = ShelleyTransitionInfo { + shelleyAfterVoting = + -- We count the number of blocks that have been applied after the + -- voting deadline has passed. + (if blockSlot blk >= votingDeadline then succ else id) $ + shelleyAfterVoting tickedShelleyLedgerTransition + } + , shelleyLedgerTables = emptyLedgerTables + } where globals = shelleyLedgerGlobals cfg swindow = SL.stabilityWindow globals @@ -556,7 +731,7 @@ decodeShelleyTransition = do encodeShelleyLedgerState :: ShelleyCompatible proto era - => LedgerState (ShelleyBlock proto era) + => LedgerState (ShelleyBlock proto era) EmptyMK -> Encoding encodeShelleyLedgerState ShelleyLedgerState { shelleyLedgerTip @@ -572,12 +747,12 @@ encodeShelleyLedgerState decodeShelleyLedgerState :: forall era proto s. ShelleyCompatible proto era - => Decoder s (LedgerState (ShelleyBlock proto era)) + => Decoder s (LedgerState (ShelleyBlock proto era) EmptyMK) decodeShelleyLedgerState = decodeVersion [ (serialisationFormatVersion2, Decode decodeShelleyLedgerState2) ] where - decodeShelleyLedgerState2 :: Decoder s' (LedgerState (ShelleyBlock proto era)) + decodeShelleyLedgerState2 :: Decoder s' (LedgerState (ShelleyBlock proto era) EmptyMK) decodeShelleyLedgerState2 = do enforceSize "LedgerState ShelleyBlock" 3 shelleyLedgerTip <- decodeWithOrigin decodeShelleyTip @@ -587,4 +762,5 @@ decodeShelleyLedgerState = decodeVersion [ shelleyLedgerTip , shelleyLedgerState , shelleyLedgerTransition + , shelleyLedgerTables = emptyLedgerTables } diff --git a/ouroboros-consensus-cardano/src/shelley/Ouroboros/Consensus/Shelley/Ledger/Mempool.hs b/ouroboros-consensus-cardano/src/shelley/Ouroboros/Consensus/Shelley/Ledger/Mempool.hs index 9c4ff4cac8..a109e2fd48 100644 --- a/ouroboros-consensus-cardano/src/shelley/Ouroboros/Consensus/Shelley/Ledger/Mempool.hs +++ b/ouroboros-consensus-cardano/src/shelley/Ouroboros/Consensus/Shelley/Ledger/Mempool.hs @@ -55,6 +55,7 @@ import qualified Cardano.Ledger.Conway.Rules as SL import qualified Cardano.Ledger.Conway.UTxO as SL import qualified Cardano.Ledger.Core as SL (txIdTxBody) import Cardano.Ledger.Crypto (Crypto) +import qualified Cardano.Ledger.Era as SL (getAllTxInputs) import qualified Cardano.Ledger.SafeHash as SL import qualified Cardano.Ledger.Shelley.API as SL import qualified Cardano.Ledger.Shelley.Rules as ShelleyEra @@ -74,11 +75,12 @@ import NoThunks.Class (NoThunks (..)) import Ouroboros.Consensus.Block import Ouroboros.Consensus.Ledger.Abstract import Ouroboros.Consensus.Ledger.SupportsMempool +import Ouroboros.Consensus.Ledger.Tables.Utils import Ouroboros.Consensus.Shelley.Eras import Ouroboros.Consensus.Shelley.Ledger.Block import Ouroboros.Consensus.Shelley.Ledger.Ledger (ShelleyLedgerConfig (shelleyLedgerGlobals), - Ticked (TickedShelleyLedgerState, tickedShelleyLedgerState), + Ticked1 (TickedShelleyLedgerState, tickedShelleyLedgerState), getPParams) import Ouroboros.Consensus.Util (ShowProxy (..)) import Ouroboros.Consensus.Util.Condense @@ -149,6 +151,11 @@ instance (ShelleyCompatible proto era, TxLimits (ShelleyBlock proto era)) txForgetValidated (ShelleyValidatedTx txid vtx) = ShelleyTx txid (SL.extractTx vtx) + getTransactionKeySets (ShelleyTx _ tx) = + LedgerTables + $ KeysMK + $ SL.getAllTxInputs (tx ^. bodyTxL) + mkShelleyTx :: forall era proto. ShelleyBasedEra era => Tx era -> GenTx (ShelleyBlock proto era) mkShelleyTx tx = ShelleyTx (SL.txIdTxBody @era (tx ^. bodyTxL)) tx @@ -227,12 +234,18 @@ applyShelleyTx :: forall era proto. -> WhetherToIntervene -> SlotNo -> GenTx (ShelleyBlock proto era) - -> TickedLedgerState (ShelleyBlock proto era) + -> TickedLedgerState (ShelleyBlock proto era) ValuesMK -> Except (ApplyTxErr (ShelleyBlock proto era)) - ( TickedLedgerState (ShelleyBlock proto era) + ( TickedLedgerState (ShelleyBlock proto era) DiffMK , Validated (GenTx (ShelleyBlock proto era)) ) -applyShelleyTx cfg wti slot (ShelleyTx _ tx) st = do +applyShelleyTx cfg wti slot (ShelleyTx _ tx) st0 = do + let st1 :: TickedLedgerState (ShelleyBlock proto era) EmptyMK + st1 = stowLedgerTables st0 + + innerSt :: SL.NewEpochState era + innerSt = tickedShelleyLedgerState st1 + (mempoolState', vtx) <- applyShelleyBasedTx (shelleyLedgerGlobals cfg) @@ -241,20 +254,25 @@ applyShelleyTx cfg wti slot (ShelleyTx _ tx) st = do wti tx - let st' = set theLedgerLens mempoolState' st + let st' :: TickedLedgerState (ShelleyBlock proto era) DiffMK + st' = forgetTrackingValues + $ calculateDifference st0 + $ unstowLedgerTables + $ set theLedgerLens mempoolState' st1 pure (st', mkShelleyValidatedTx vtx) - where - innerSt = tickedShelleyLedgerState st reapplyShelleyTx :: ShelleyBasedEra era => LedgerConfig (ShelleyBlock proto era) -> SlotNo -> Validated (GenTx (ShelleyBlock proto era)) - -> TickedLedgerState (ShelleyBlock proto era) - -> Except (ApplyTxErr (ShelleyBlock proto era)) (TickedLedgerState (ShelleyBlock proto era)) -reapplyShelleyTx cfg slot vgtx st = do + -> TickedLedgerState (ShelleyBlock proto era) ValuesMK + -> Except (ApplyTxErr (ShelleyBlock proto era)) (TickedLedgerState (ShelleyBlock proto era) ValuesMK) +reapplyShelleyTx cfg slot vgtx st0 = do + let st1 = stowLedgerTables st0 + innerSt = tickedShelleyLedgerState st1 + mempoolState' <- SL.reapplyTx (shelleyLedgerGlobals cfg) @@ -262,12 +280,13 @@ reapplyShelleyTx cfg slot vgtx st = do (SL.mkMempoolState innerSt) vtx - pure $ set theLedgerLens mempoolState' st + let st2 = unstowLedgerTables + $ set theLedgerLens mempoolState' st1 + + pure st2 where ShelleyValidatedTx _txid vtx = vgtx - innerSt = tickedShelleyLedgerState st - -- | The lens combinator set :: (forall f. Applicative f => (a -> f b) -> s -> f t) @@ -278,8 +297,8 @@ set lens inner outer = theLedgerLens :: Functor f => (SL.LedgerState era -> f (SL.LedgerState era)) - -> TickedLedgerState (ShelleyBlock proto era) - -> f (TickedLedgerState (ShelleyBlock proto era)) + -> TickedLedgerState (ShelleyBlock proto era) mk + -> f (TickedLedgerState (ShelleyBlock proto era) mk) theLedgerLens f x = (\y -> x{tickedShelleyLedgerState = y}) <$> SL.overNewEpochState f (tickedShelleyLedgerState x) @@ -310,7 +329,7 @@ runValidation = liftEither . (unTxErrorSG +++ id) . V.toEither txsMaxBytes :: ShelleyCompatible proto era - => TickedLedgerState (ShelleyBlock proto era) + => TickedLedgerState (ShelleyBlock proto era) mk -> IgnoringOverflow ByteSize32 txsMaxBytes TickedShelleyLedgerState { tickedShelleyLedgerState } = -- `maxBlockBodySize` is expected to be bigger than `fixedBlockBodyOverhead` @@ -322,7 +341,7 @@ txsMaxBytes TickedShelleyLedgerState { tickedShelleyLedgerState } = txInBlockSize :: (ShelleyCompatible proto era, MaxTxSizeUTxO era) - => TickedLedgerState (ShelleyBlock proto era) + => TickedLedgerState (ShelleyBlock proto era) mk -> GenTx (ShelleyBlock proto era) -> V.Validation (TxErrorSG era) (IgnoringOverflow ByteSize32) txInBlockSize st (ShelleyTx _txid tx') = @@ -423,9 +442,9 @@ fromExUnits :: ExUnits -> ExUnits' Natural fromExUnits = unWrapExUnits blockCapacityAlonzoMeasure :: - forall proto era. + forall proto era mk. (ShelleyCompatible proto era, L.AlonzoEraPParams era) - => TickedLedgerState (ShelleyBlock proto era) + => TickedLedgerState (ShelleyBlock proto era) mk -> AlonzoMeasure blockCapacityAlonzoMeasure ledgerState = AlonzoMeasure { @@ -443,7 +462,7 @@ txMeasureAlonzo :: , ExUnitsTooBigUTxO era , MaxTxSizeUTxO era ) - => TickedLedgerState (ShelleyBlock proto era) + => TickedLedgerState (ShelleyBlock proto era) ValuesMK -> GenTx (ShelleyBlock proto era) -> V.Validation (TxErrorSG era) AlonzoMeasure txMeasureAlonzo st tx@(ShelleyTx _txid tx') = @@ -510,11 +529,11 @@ instance HasByteSize ConwayMeasure where txMeasureByteSize = txMeasureByteSize . alonzoMeasure blockCapacityConwayMeasure :: - forall proto era. + forall proto era mk. ( ShelleyCompatible proto era , L.AlonzoEraPParams era ) - => TickedLedgerState (ShelleyBlock proto era) + => TickedLedgerState (ShelleyBlock proto era) mk -> ConwayMeasure blockCapacityConwayMeasure st = ConwayMeasure { @@ -533,7 +552,7 @@ txMeasureConway :: , MaxTxSizeUTxO era , TxRefScriptsSizeTooBig era ) - => TickedLedgerState (ShelleyBlock proto era) + => TickedLedgerState (ShelleyBlock proto era) ValuesMK -> GenTx (ShelleyBlock proto era) -> V.Validation (TxErrorSG era) ConwayMeasure txMeasureConway st tx@(ShelleyTx _txid tx') = @@ -568,7 +587,7 @@ txMeasureBabbage :: , ExUnitsTooBigUTxO era , MaxTxSizeUTxO era ) - => TickedLedgerState (ShelleyBlock proto era) + => TickedLedgerState (ShelleyBlock proto era) ValuesMK -> GenTx (ShelleyBlock proto era) -> V.Validation (TxErrorSG era) ConwayMeasure txMeasureBabbage st tx@(ShelleyTx _txid tx') = diff --git a/ouroboros-consensus-cardano/src/shelley/Ouroboros/Consensus/Shelley/Ledger/Query.hs b/ouroboros-consensus-cardano/src/shelley/Ouroboros/Consensus/Shelley/Ledger/Query.hs index 3be3c96596..2c55483d80 100644 --- a/ouroboros-consensus-cardano/src/shelley/Ouroboros/Consensus/Shelley/Ledger/Query.hs +++ b/ouroboros-consensus-cardano/src/shelley/Ouroboros/Consensus/Shelley/Ledger/Query.hs @@ -1,3 +1,4 @@ +{-# LANGUAGE BangPatterns #-} {-# LANGUAGE DataKinds #-} {-# LANGUAGE DeriveGeneric #-} {-# LANGUAGE DerivingStrategies #-} @@ -6,6 +7,7 @@ {-# LANGUAGE GADTs #-} {-# LANGUAGE GeneralizedNewtypeDeriving #-} {-# LANGUAGE LambdaCase #-} +{-# LANGUAGE MultiParamTypeClasses #-} {-# LANGUAGE NamedFieldPuns #-} {-# LANGUAGE OverloadedStrings #-} {-# LANGUAGE RankNTypes #-} @@ -14,6 +16,7 @@ {-# LANGUAGE TypeApplications #-} {-# LANGUAGE TypeFamilies #-} {-# LANGUAGE TypeOperators #-} +{-# LANGUAGE UndecidableInstances #-} {-# OPTIONS_GHC -Wno-orphans #-} @@ -28,16 +31,22 @@ module Ouroboros.Consensus.Shelley.Ledger.Query ( , decodeShelleyResult , encodeShelleyQuery , encodeShelleyResult + -- * BlockSupportsHFLedgerQuery instances + , answerShelleyLookupQueries + , answerShelleyTraversingQueries + , filterGetUTxOByAddressOne ) where import Cardano.Binary (FromCBOR (..), ToCBOR (..), encodeListLen, enforceSize) +import Cardano.Ledger.Address import qualified Cardano.Ledger.Api.State.Query as SL import Cardano.Ledger.CertState (lookupDepositDState) import qualified Cardano.Ledger.CertState as SL import Cardano.Ledger.Coin (Coin) import Cardano.Ledger.Compactible (Compactible (fromCompact)) import qualified Cardano.Ledger.Conway.Governance as CG +import qualified Cardano.Ledger.Core as SL import Cardano.Ledger.Credential (StakeCredential) import Cardano.Ledger.Crypto (Crypto) import qualified Cardano.Ledger.EpochBoundary as SL @@ -59,25 +68,30 @@ import qualified Codec.CBOR.Encoding as CBOR import Codec.Serialise (decode, encode) import Control.DeepSeq (NFData) import Data.Bifunctor (second) -import Data.Kind (Type) import Data.Map.Strict (Map) import qualified Data.Map.Strict as Map import Data.Maybe (fromMaybe) import Data.Sequence (Seq (..)) import Data.Set (Set) import qualified Data.Set as Set -import Data.Type.Equality (apply) +import Data.SOP.Index import Data.Typeable (Typeable) import qualified Data.VMap as VMap import GHC.Generics (Generic) +import Lens.Micro import Lens.Micro.Extras (view) import Ouroboros.Consensus.Block import Ouroboros.Consensus.Config +import Ouroboros.Consensus.HardFork.Combinator.Abstract.CanHardFork +import Ouroboros.Consensus.HardFork.Combinator.Basics +import Ouroboros.Consensus.HardFork.Combinator.Ledger +import Ouroboros.Consensus.HardFork.Combinator.Ledger.Query import Ouroboros.Consensus.HeaderValidation import Ouroboros.Consensus.Ledger.Basics import Ouroboros.Consensus.Ledger.Extended import Ouroboros.Consensus.Ledger.Query import Ouroboros.Consensus.Ledger.SupportsPeerSelection +import Ouroboros.Consensus.Ledger.SupportsProtocol import Ouroboros.Consensus.Protocol.Abstract (ChainDepState) import Ouroboros.Consensus.Shelley.Eras (EraCrypto) import qualified Ouroboros.Consensus.Shelley.Eras as SE @@ -90,14 +104,17 @@ import Ouroboros.Consensus.Shelley.Ledger.PeerSelection () import Ouroboros.Consensus.Shelley.Ledger.Query.PParamsLegacyEncoder import Ouroboros.Consensus.Shelley.Ledger.Query.Types import Ouroboros.Consensus.Shelley.Protocol.Abstract (ProtoCrypto) +import Ouroboros.Consensus.Storage.LedgerDB +import qualified Ouroboros.Consensus.Storage.LedgerDB as LedgerDB import Ouroboros.Consensus.Util (ShowProxy (..)) +import Ouroboros.Consensus.Util.IOLike (MonadSTM (atomically)) import Ouroboros.Network.Block (Serialised (..), decodePoint, encodePoint, mkSerialised) import Ouroboros.Network.PeerSelection.LedgerPeers.Type import Ouroboros.Network.PeerSelection.LedgerPeers.Utils {------------------------------------------------------------------------------- - QueryLedger + BlockSupportsLedgerQuery -------------------------------------------------------------------------------} newtype NonMyopicMemberRewards c = NonMyopicMemberRewards { @@ -116,18 +133,18 @@ type VoteDelegatees c = Map (SL.Credential 'SL.Staking c) (SL.DRep c) -data instance BlockQuery (ShelleyBlock proto era) :: Type -> Type where - GetLedgerTip :: BlockQuery (ShelleyBlock proto era) (Point (ShelleyBlock proto era)) - GetEpochNo :: BlockQuery (ShelleyBlock proto era) EpochNo +data instance BlockQuery (ShelleyBlock proto era) fp result where + GetLedgerTip :: BlockQuery (ShelleyBlock proto era) QFNoTables (Point (ShelleyBlock proto era)) + GetEpochNo :: BlockQuery (ShelleyBlock proto era) QFNoTables EpochNo -- | Calculate the Non-Myopic Pool Member Rewards for a set of -- credentials. See 'SL.getNonMyopicMemberRewards' GetNonMyopicMemberRewards :: Set (Either SL.Coin (SL.Credential 'SL.Staking (EraCrypto era))) - -> BlockQuery (ShelleyBlock proto era) (NonMyopicMemberRewards (EraCrypto era)) + -> BlockQuery (ShelleyBlock proto era) QFNoTables (NonMyopicMemberRewards (EraCrypto era)) GetCurrentPParams - :: BlockQuery (ShelleyBlock proto era) (LC.PParams era) + :: BlockQuery (ShelleyBlock proto era) QFNoTables (LC.PParams era) GetProposedPParamsUpdates - :: BlockQuery (ShelleyBlock proto era) (SL.ProposedPPUpdates era) + :: BlockQuery (ShelleyBlock proto era) QFNoTables (SL.ProposedPPUpdates era) -- | This gets the stake distribution, but not in terms of _active_ stake -- (which we need for the leader schedule), but rather in terms of _total_ -- stake, which is relevant for rewards. It is used by the wallet to show @@ -135,7 +152,7 @@ data instance BlockQuery (ShelleyBlock proto era) :: Type -> Type where -- an endpoint that provides all the information that the wallet wants about -- pools, in an extensible fashion. GetStakeDistribution - :: BlockQuery (ShelleyBlock proto era) (PoolDistr (EraCrypto era)) + :: BlockQuery (ShelleyBlock proto era) QFNoTables (PoolDistr (EraCrypto era)) -- | Get a subset of the UTxO, filtered by address. Although this will -- typically return a lot less data than 'GetUTxOWhole', it requires a linear @@ -145,18 +162,18 @@ data instance BlockQuery (ShelleyBlock proto era) :: Type -> Type where -- GetUTxOByAddress :: Set (SL.Addr (EraCrypto era)) - -> BlockQuery (ShelleyBlock proto era) (SL.UTxO era) + -> BlockQuery (ShelleyBlock proto era) QFTraverseTables (SL.UTxO era) -- | Get the /entire/ UTxO. This is only suitable for debug/testing purposes -- because otherwise it is far too much data. -- GetUTxOWhole - :: BlockQuery (ShelleyBlock proto era) (SL.UTxO era) + :: BlockQuery (ShelleyBlock proto era) QFTraverseTables (SL.UTxO era) -- | Only for debugging purposes, we make no effort to ensure binary -- compatibility (cf the comment on 'GetCBOR'). Moreover, it is huge. DebugEpochState - :: BlockQuery (ShelleyBlock proto era) (SL.EpochState era) + :: BlockQuery (ShelleyBlock proto era) QFNoTables (SL.EpochState era) -- | Wrap the result of the query using CBOR-in-CBOR. -- @@ -172,82 +189,81 @@ data instance BlockQuery (ShelleyBlock proto era) :: Type -> Type where -- decode it, the client can fall back to pretty printing the actual CBOR, -- which is better than no output at all. GetCBOR - :: BlockQuery (ShelleyBlock proto era) result - -> BlockQuery (ShelleyBlock proto era) (Serialised result) + :: BlockQuery (ShelleyBlock proto era) fp result + -> BlockQuery (ShelleyBlock proto era) fp (Serialised result) GetFilteredDelegationsAndRewardAccounts :: Set (SL.Credential 'SL.Staking (EraCrypto era)) - -> BlockQuery (ShelleyBlock proto era) + -> BlockQuery (ShelleyBlock proto era) QFNoTables (Delegations (EraCrypto era), SL.RewardAccounts (EraCrypto era)) GetGenesisConfig - :: BlockQuery (ShelleyBlock proto era) (CompactGenesis (EraCrypto era)) + :: BlockQuery (ShelleyBlock proto era) QFNoTables (CompactGenesis (EraCrypto era)) -- | Only for debugging purposes, we make no effort to ensure binary -- compatibility (cf the comment on 'GetCBOR'). Moreover, it is huge. DebugNewEpochState - :: BlockQuery (ShelleyBlock proto era) (SL.NewEpochState era) + :: BlockQuery (ShelleyBlock proto era) QFNoTables (SL.NewEpochState era) -- | Only for debugging purposes, we make no effort to ensure binary -- compatibility (cf the comment on 'GetCBOR'). DebugChainDepState - :: BlockQuery (ShelleyBlock proto era) (ChainDepState proto) + :: BlockQuery (ShelleyBlock proto era) QFNoTables (ChainDepState proto) GetRewardProvenance - :: BlockQuery (ShelleyBlock proto era) (SL.RewardProvenance (EraCrypto era)) + :: BlockQuery (ShelleyBlock proto era) QFNoTables (SL.RewardProvenance (EraCrypto era)) -- | Get a subset of the UTxO, filtered by transaction input. This is -- efficient and costs only O(m * log n) for m inputs and a UTxO of size n. -- GetUTxOByTxIn :: Set (SL.TxIn (EraCrypto era)) - -> BlockQuery (ShelleyBlock proto era) (SL.UTxO era) + -> BlockQuery (ShelleyBlock proto era) QFLookupTables (SL.UTxO era) GetStakePools - :: BlockQuery (ShelleyBlock proto era) + :: BlockQuery (ShelleyBlock proto era) QFNoTables (Set (SL.KeyHash 'SL.StakePool (EraCrypto era))) GetStakePoolParams :: Set (SL.KeyHash 'SL.StakePool (EraCrypto era)) - -> BlockQuery (ShelleyBlock proto era) + -> BlockQuery (ShelleyBlock proto era) QFNoTables (Map (SL.KeyHash 'SL.StakePool (EraCrypto era)) (SL.PoolParams (EraCrypto era))) GetRewardInfoPools - :: BlockQuery (ShelleyBlock proto era) + :: BlockQuery (ShelleyBlock proto era) QFNoTables (SL.RewardParams, - Map (SL.KeyHash 'SL.StakePool (EraCrypto era)) - (SL.RewardInfoPool)) + Map (SL.KeyHash 'SL.StakePool (EraCrypto era)) SL.RewardInfoPool) GetPoolState :: Maybe (Set (SL.KeyHash 'SL.StakePool (EraCrypto era))) - -> BlockQuery (ShelleyBlock proto era) + -> BlockQuery (ShelleyBlock proto era) QFNoTables (SL.PState era) GetStakeSnapshots :: Maybe (Set (SL.KeyHash 'SL.StakePool (EraCrypto era))) - -> BlockQuery (ShelleyBlock proto era) + -> BlockQuery (ShelleyBlock proto era) QFNoTables (StakeSnapshots (EraCrypto era)) GetPoolDistr :: Maybe (Set (SL.KeyHash 'SL.StakePool (EraCrypto era))) - -> BlockQuery (ShelleyBlock proto era) + -> BlockQuery (ShelleyBlock proto era) QFNoTables (PoolDistr (EraCrypto era)) GetStakeDelegDeposits :: Set (StakeCredential (EraCrypto era)) - -> BlockQuery (ShelleyBlock proto era) + -> BlockQuery (ShelleyBlock proto era) QFNoTables (Map (StakeCredential (EraCrypto era)) Coin) -- | Not supported in eras before Conway GetConstitution :: CG.ConwayEraGov era - => BlockQuery (ShelleyBlock proto era) (CG.Constitution era) + => BlockQuery (ShelleyBlock proto era) QFNoTables (CG.Constitution era) -- | Although this query was introduced as part of Conway, it is general and -- so has non-degenerate semantics for eras before Conway. GetGovState - :: BlockQuery (ShelleyBlock proto era) (LC.GovState era) + :: BlockQuery (ShelleyBlock proto era) QFNoTables (LC.GovState era) -- | The argument specifies the credential of each 'DRep' whose state should -- be returned. When it's empty, the state of every 'DRep' is returned. @@ -257,6 +273,7 @@ data instance BlockQuery (ShelleyBlock proto era) :: Type -> Type where :: CG.ConwayEraGov era => Set (SL.Credential 'DRepRole (EraCrypto era)) -> BlockQuery (ShelleyBlock proto era) + QFNoTables (Map (SL.Credential 'DRepRole (EraCrypto era)) (SL.DRepState (EraCrypto era)) @@ -273,7 +290,7 @@ data instance BlockQuery (ShelleyBlock proto era) :: Type -> Type where GetDRepStakeDistr :: CG.ConwayEraGov era => Set (SL.DRep (EraCrypto era)) - -> BlockQuery (ShelleyBlock proto era) (Map (SL.DRep (EraCrypto era)) Coin) + -> BlockQuery (ShelleyBlock proto era) QFNoTables (Map (SL.DRep (EraCrypto era)) Coin) -- | Query committee members -- @@ -283,16 +300,16 @@ data instance BlockQuery (ShelleyBlock proto era) :: Type -> Type where => Set (SL.Credential 'ColdCommitteeRole (EraCrypto era) ) -> Set (SL.Credential 'HotCommitteeRole (EraCrypto era)) -> Set SL.MemberStatus - -> BlockQuery (ShelleyBlock proto era) (SL.CommitteeMembersState (EraCrypto era)) + -> BlockQuery (ShelleyBlock proto era) QFNoTables (SL.CommitteeMembersState (EraCrypto era)) -- | Not supported in eras before Conway. GetFilteredVoteDelegatees :: CG.ConwayEraGov era => Set (SL.Credential 'SL.Staking (EraCrypto era)) - -> BlockQuery (ShelleyBlock proto era) (VoteDelegatees (EraCrypto era)) + -> BlockQuery (ShelleyBlock proto era) QFNoTables (VoteDelegatees (EraCrypto era)) GetAccountState - :: BlockQuery (ShelleyBlock proto era) AccountState + :: BlockQuery (ShelleyBlock proto era) QFNoTables AccountState -- | Query the SPO voting stake distribution. -- This stake distribution is different from the one used in leader election. @@ -303,26 +320,26 @@ data instance BlockQuery (ShelleyBlock proto era) :: Type -> Type where GetSPOStakeDistr :: CG.ConwayEraGov era => Set (KeyHash 'StakePool (EraCrypto era)) - -> BlockQuery (ShelleyBlock proto era) (Map (KeyHash 'StakePool (EraCrypto era)) Coin) + -> BlockQuery (ShelleyBlock proto era) QFNoTables (Map (KeyHash 'StakePool (EraCrypto era)) Coin) GetProposals :: CG.ConwayEraGov era => Set (CG.GovActionId (EraCrypto era)) - -> BlockQuery (ShelleyBlock proto era) (Seq (CG.GovActionState era)) + -> BlockQuery (ShelleyBlock proto era) QFNoTables (Seq (CG.GovActionState era)) GetRatifyState :: CG.ConwayEraGov era - => BlockQuery (ShelleyBlock proto era) (CG.RatifyState era) + => BlockQuery (ShelleyBlock proto era) QFNoTables (CG.RatifyState era) GetFuturePParams - :: BlockQuery (ShelleyBlock proto era) (Maybe (LC.PParams era)) + :: BlockQuery (ShelleyBlock proto era) QFNoTables (Maybe (LC.PParams era)) -- | Obtain a snapshot of big ledger peers. CLI can serialize these, -- and if made available to the node by topology configuration, -- the diffusion layer can use these peers when syncing up from scratch -- or stale ledger state - especially useful for Genesis mode GetBigLedgerPeerSnapshot - :: BlockQuery (ShelleyBlock proto era) LedgerPeerSnapshot + :: BlockQuery (ShelleyBlock proto era) QFNoTables LedgerPeerSnapshot -- WARNING: please add new queries to the end of the list and stick to this -- order in all other pattern matches on queries. This helps in particular @@ -341,9 +358,12 @@ data instance BlockQuery (ShelleyBlock proto era) :: Type -> Type where instance (Typeable era, Typeable proto) => ShowProxy (BlockQuery (ShelleyBlock proto era)) where -instance (ShelleyCompatible proto era, ProtoCrypto proto ~ crypto) +instance ( ShelleyCompatible proto era + , LedgerSupportsProtocol (ShelleyBlock proto era) + , ProtoCrypto proto ~ crypto + ) => BlockSupportsLedgerQuery (ShelleyBlock proto era) where - answerBlockQuery cfg query ext = + answerPureBlockQuery cfg query ext = case query of GetLedgerTip -> shelleyLedgerTipPoint lst @@ -358,10 +378,6 @@ instance (ShelleyCompatible proto era, ProtoCrypto proto ~ crypto) getProposedPPUpdates st GetStakeDistribution -> fromLedgerPoolDistr $ SL.poolsByTotalStakeFraction globals st - GetUTxOByAddress addrs -> - SL.getFilteredUTxO st addrs - GetUTxOWhole -> - SL.getUTxO st DebugEpochState -> getEpochState st GetCBOR query' -> @@ -370,7 +386,7 @@ instance (ShelleyCompatible proto era, ProtoCrypto proto ~ crypto) -- both client and server are running the same version; cf. the -- @GetCBOR@ Haddocks. mkSerialised (encodeShelleyResult maxBound query') $ - answerBlockQuery cfg query' ext + answerPureBlockQuery cfg query' ext GetFilteredDelegationsAndRewardAccounts creds -> getFilteredDelegationsAndRewardAccounts st creds GetGenesisConfig -> @@ -381,8 +397,6 @@ instance (ShelleyCompatible proto era, ProtoCrypto proto ~ crypto) headerStateChainDep hst GetRewardProvenance -> snd $ SL.getRewardProvenance globals st - GetUTxOByTxIn txins -> - SL.getUTxOSubset st txins GetStakePools -> SL.getPools st GetStakePoolParams poolids -> @@ -419,7 +433,7 @@ instance (ShelleyCompatible proto era, ProtoCrypto proto ~ crypto) totalGoByPoolId = SL.sumStakePerPool (SL.ssDelegations ssStakeGo) (SL.ssStake ssStakeGo) getPoolStakes :: Set (KeyHash 'StakePool crypto) -> Map (KeyHash 'StakePool crypto) (StakeSnapshot crypto) - getPoolStakes poolIds = Map.fromSet mkStakeSnapshot poolIds + getPoolStakes = Map.fromSet mkStakeSnapshot where mkStakeSnapshot poolId = StakeSnapshot { ssMarkPool = Map.findWithDefault mempty poolId totalMarkByPoolId , ssSetPool = Map.findWithDefault mempty poolId totalSetByPoolId @@ -505,160 +519,204 @@ instance (ShelleyCompatible proto era, ProtoCrypto proto ~ crypto) hst = headerState ext st = shelleyLedgerState lst -instance SameDepIndex (BlockQuery (ShelleyBlock proto era)) where - sameDepIndex GetLedgerTip GetLedgerTip + answerBlockQueryLookup cfg qry forker = case qry of + GetUTxOByTxIn ks -> do + values <- LedgerDB.roforkerReadTables forker $ LedgerTables $ KeysMK ks + flip SL.getUTxOSubset ks + . shelleyLedgerState + . ledgerState + . stowLedgerTables + . flip withLedgerTables values + <$> atomically (LedgerDB.roforkerGetLedgerState forker) + GetCBOR qry' -> + mkSerialised (encodeShelleyResult maxBound qry') <$> + answerBlockQueryLookup cfg qry' forker + + answerBlockQueryTraverse cfg qry forker = case qry of + GetUTxOByAddress addrs -> loop (filterGetUTxOByAddressOne addrs) NoPreviousQuery emptyUtxo + GetUTxOWhole -> loop (const True) NoPreviousQuery emptyUtxo + GetCBOR q' -> + mkSerialised (encodeShelleyResult maxBound q') <$> + answerBlockQueryTraverse cfg q' forker + where + emptyUtxo = SL.UTxO Map.empty + + combUtxo (SL.UTxO l) vs = SL.UTxO $ Map.union l vs + + partial :: + (Value (LedgerState (ShelleyBlock proto era)) -> Bool) + -> LedgerTables (ExtLedgerState (ShelleyBlock proto era)) ValuesMK + -> Map (SL.TxIn (EraCrypto era)) (LC.TxOut era) + partial queryPredicate (LedgerTables (ValuesMK vs)) = + Map.filter queryPredicate vs + + f :: ValuesMK k v -> Bool + f (ValuesMK vs) = Map.null vs + + toKey (LedgerTables (ValuesMK vs)) = fst $ Map.findMax vs + + loop queryPredicate !prev !acc = do + extValues <- LedgerDB.roforkerRangeReadTables forker prev + if ltcollapse $ ltmap (K2 . f) extValues + then pure acc + else loop queryPredicate + (PreviousQueryWasUpTo $ toKey extValues) + (combUtxo acc $ partial queryPredicate extValues) + +instance SameDepIndex2 (BlockQuery (ShelleyBlock proto era)) where + sameDepIndex2 GetLedgerTip GetLedgerTip = Just Refl - sameDepIndex GetLedgerTip _ + sameDepIndex2 GetLedgerTip _ = Nothing - sameDepIndex GetEpochNo GetEpochNo + sameDepIndex2 GetEpochNo GetEpochNo = Just Refl - sameDepIndex GetEpochNo _ + sameDepIndex2 GetEpochNo _ = Nothing - sameDepIndex (GetNonMyopicMemberRewards creds) (GetNonMyopicMemberRewards creds') + sameDepIndex2 (GetNonMyopicMemberRewards creds) (GetNonMyopicMemberRewards creds') | creds == creds' = Just Refl | otherwise = Nothing - sameDepIndex (GetNonMyopicMemberRewards _) _ + sameDepIndex2 (GetNonMyopicMemberRewards _) _ = Nothing - sameDepIndex GetCurrentPParams GetCurrentPParams + sameDepIndex2 GetCurrentPParams GetCurrentPParams = Just Refl - sameDepIndex GetCurrentPParams _ + sameDepIndex2 GetCurrentPParams _ = Nothing - sameDepIndex GetProposedPParamsUpdates GetProposedPParamsUpdates + sameDepIndex2 GetProposedPParamsUpdates GetProposedPParamsUpdates = Just Refl - sameDepIndex GetProposedPParamsUpdates _ + sameDepIndex2 GetProposedPParamsUpdates _ = Nothing - sameDepIndex GetStakeDistribution GetStakeDistribution + sameDepIndex2 GetStakeDistribution GetStakeDistribution = Just Refl - sameDepIndex GetStakeDistribution _ + sameDepIndex2 GetStakeDistribution _ = Nothing - sameDepIndex (GetUTxOByAddress addrs) (GetUTxOByAddress addrs') + sameDepIndex2 (GetUTxOByAddress addrs) (GetUTxOByAddress addrs') | addrs == addrs' = Just Refl | otherwise = Nothing - sameDepIndex (GetUTxOByAddress _) _ + sameDepIndex2 (GetUTxOByAddress _) _ = Nothing - sameDepIndex GetUTxOWhole GetUTxOWhole + sameDepIndex2 GetUTxOWhole GetUTxOWhole = Just Refl - sameDepIndex GetUTxOWhole _ + sameDepIndex2 GetUTxOWhole _ = Nothing - sameDepIndex DebugEpochState DebugEpochState + sameDepIndex2 DebugEpochState DebugEpochState = Just Refl - sameDepIndex DebugEpochState _ + sameDepIndex2 DebugEpochState _ = Nothing - sameDepIndex (GetCBOR q) (GetCBOR q') - = apply Refl <$> sameDepIndex q q' - sameDepIndex (GetCBOR _) _ + sameDepIndex2 (GetCBOR q) (GetCBOR q') + = (\Refl -> Refl) <$> sameDepIndex2 q q' + sameDepIndex2 (GetCBOR _) _ = Nothing - sameDepIndex (GetFilteredDelegationsAndRewardAccounts creds) + sameDepIndex2 (GetFilteredDelegationsAndRewardAccounts creds) (GetFilteredDelegationsAndRewardAccounts creds') | creds == creds' = Just Refl | otherwise = Nothing - sameDepIndex (GetFilteredDelegationsAndRewardAccounts _) _ + sameDepIndex2 (GetFilteredDelegationsAndRewardAccounts _) _ = Nothing - sameDepIndex GetGenesisConfig GetGenesisConfig + sameDepIndex2 GetGenesisConfig GetGenesisConfig = Just Refl - sameDepIndex GetGenesisConfig _ + sameDepIndex2 GetGenesisConfig _ = Nothing - sameDepIndex DebugNewEpochState DebugNewEpochState + sameDepIndex2 DebugNewEpochState DebugNewEpochState = Just Refl - sameDepIndex DebugNewEpochState _ + sameDepIndex2 DebugNewEpochState _ = Nothing - sameDepIndex DebugChainDepState DebugChainDepState + sameDepIndex2 DebugChainDepState DebugChainDepState = Just Refl - sameDepIndex DebugChainDepState _ + sameDepIndex2 DebugChainDepState _ = Nothing - sameDepIndex GetRewardProvenance GetRewardProvenance + sameDepIndex2 GetRewardProvenance GetRewardProvenance = Just Refl - sameDepIndex GetRewardProvenance _ + sameDepIndex2 GetRewardProvenance _ = Nothing - sameDepIndex (GetUTxOByTxIn addrs) (GetUTxOByTxIn addrs') + sameDepIndex2 (GetUTxOByTxIn addrs) (GetUTxOByTxIn addrs') | addrs == addrs' = Just Refl | otherwise = Nothing - sameDepIndex (GetUTxOByTxIn _) _ + sameDepIndex2 (GetUTxOByTxIn _) _ = Nothing - sameDepIndex GetStakePools GetStakePools + sameDepIndex2 GetStakePools GetStakePools = Just Refl - sameDepIndex GetStakePools _ + sameDepIndex2 GetStakePools _ = Nothing - sameDepIndex (GetStakePoolParams poolids) (GetStakePoolParams poolids') + sameDepIndex2 (GetStakePoolParams poolids) (GetStakePoolParams poolids') | poolids == poolids' = Just Refl | otherwise = Nothing - sameDepIndex (GetStakePoolParams _) _ + sameDepIndex2 (GetStakePoolParams _) _ = Nothing - sameDepIndex GetRewardInfoPools GetRewardInfoPools + sameDepIndex2 GetRewardInfoPools GetRewardInfoPools = Just Refl - sameDepIndex GetRewardInfoPools _ + sameDepIndex2 GetRewardInfoPools _ = Nothing - sameDepIndex (GetPoolState poolids) (GetPoolState poolids') + sameDepIndex2 (GetPoolState poolids) (GetPoolState poolids') | poolids == poolids' = Just Refl | otherwise = Nothing - sameDepIndex (GetPoolState _) _ + sameDepIndex2 (GetPoolState _) _ = Nothing - sameDepIndex (GetStakeSnapshots poolid) (GetStakeSnapshots poolid') + sameDepIndex2 (GetStakeSnapshots poolid) (GetStakeSnapshots poolid') | poolid == poolid' = Just Refl | otherwise = Nothing - sameDepIndex (GetStakeSnapshots _) _ + sameDepIndex2 (GetStakeSnapshots _) _ = Nothing - sameDepIndex (GetPoolDistr poolids) (GetPoolDistr poolids') + sameDepIndex2 (GetPoolDistr poolids) (GetPoolDistr poolids') | poolids == poolids' = Just Refl | otherwise = Nothing - sameDepIndex (GetPoolDistr _) _ + sameDepIndex2 (GetPoolDistr _) _ = Nothing - sameDepIndex (GetStakeDelegDeposits stakeCreds) (GetStakeDelegDeposits stakeCreds') + sameDepIndex2 (GetStakeDelegDeposits stakeCreds) (GetStakeDelegDeposits stakeCreds') | stakeCreds == stakeCreds' = Just Refl | otherwise = Nothing - sameDepIndex (GetStakeDelegDeposits _) _ + sameDepIndex2 (GetStakeDelegDeposits _) _ = Nothing - sameDepIndex GetConstitution GetConstitution = Just Refl - sameDepIndex GetConstitution _ = Nothing - sameDepIndex GetGovState GetGovState = Just Refl - sameDepIndex GetGovState _ = Nothing - sameDepIndex GetDRepState{} GetDRepState{} = Just Refl - sameDepIndex GetDRepState{} _ = Nothing - sameDepIndex GetDRepStakeDistr{} GetDRepStakeDistr{} = Just Refl - sameDepIndex GetDRepStakeDistr{} _ = Nothing - sameDepIndex GetCommitteeMembersState{} GetCommitteeMembersState{} = Just Refl - sameDepIndex GetCommitteeMembersState{} _ = Nothing - sameDepIndex (GetFilteredVoteDelegatees stakeCreds) (GetFilteredVoteDelegatees stakeCreds') + sameDepIndex2 GetConstitution GetConstitution = Just Refl + sameDepIndex2 GetConstitution _ = Nothing + sameDepIndex2 GetGovState GetGovState = Just Refl + sameDepIndex2 GetGovState _ = Nothing + sameDepIndex2 GetDRepState{} GetDRepState{} = Just Refl + sameDepIndex2 GetDRepState{} _ = Nothing + sameDepIndex2 GetDRepStakeDistr{} GetDRepStakeDistr{} = Just Refl + sameDepIndex2 GetDRepStakeDistr{} _ = Nothing + sameDepIndex2 GetCommitteeMembersState{} GetCommitteeMembersState{} = Just Refl + sameDepIndex2 GetCommitteeMembersState{} _ = Nothing + sameDepIndex2 (GetFilteredVoteDelegatees stakeCreds) (GetFilteredVoteDelegatees stakeCreds') | stakeCreds == stakeCreds' = Just Refl | otherwise = Nothing - sameDepIndex GetFilteredVoteDelegatees {} _ = Nothing - sameDepIndex GetAccountState {} GetAccountState {} = Just Refl - sameDepIndex GetAccountState {} _ = Nothing - sameDepIndex GetSPOStakeDistr{} GetSPOStakeDistr{} = Just Refl - sameDepIndex GetSPOStakeDistr{} _ = Nothing - sameDepIndex GetProposals{} GetProposals{} = Just Refl - sameDepIndex GetProposals{} _ = Nothing - sameDepIndex GetRatifyState{} GetRatifyState{} = Just Refl - sameDepIndex GetRatifyState{} _ = Nothing - sameDepIndex GetFuturePParams{} GetFuturePParams{} = Just Refl - sameDepIndex GetFuturePParams{} _ = Nothing - sameDepIndex GetBigLedgerPeerSnapshot GetBigLedgerPeerSnapshot = Just Refl - sameDepIndex GetBigLedgerPeerSnapshot _ = Nothing - -deriving instance Eq (BlockQuery (ShelleyBlock proto era) result) -deriving instance Show (BlockQuery (ShelleyBlock proto era) result) - -instance ShelleyCompatible proto era => ShowQuery (BlockQuery (ShelleyBlock proto era)) where + sameDepIndex2 GetFilteredVoteDelegatees {} _ = Nothing + sameDepIndex2 GetAccountState {} GetAccountState {} = Just Refl + sameDepIndex2 GetAccountState {} _ = Nothing + sameDepIndex2 GetSPOStakeDistr{} GetSPOStakeDistr{} = Just Refl + sameDepIndex2 GetSPOStakeDistr{} _ = Nothing + sameDepIndex2 GetProposals{} GetProposals{} = Just Refl + sameDepIndex2 GetProposals{} _ = Nothing + sameDepIndex2 GetRatifyState{} GetRatifyState{} = Just Refl + sameDepIndex2 GetRatifyState{} _ = Nothing + sameDepIndex2 GetFuturePParams{} GetFuturePParams{} = Just Refl + sameDepIndex2 GetFuturePParams{} _ = Nothing + sameDepIndex2 GetBigLedgerPeerSnapshot GetBigLedgerPeerSnapshot = Just Refl + sameDepIndex2 GetBigLedgerPeerSnapshot _ = Nothing + +deriving instance Eq (BlockQuery (ShelleyBlock proto era) fp result) +deriving instance Show (BlockQuery (ShelleyBlock proto era) fp result) + +instance ShelleyCompatible proto era => ShowQuery (BlockQuery (ShelleyBlock proto era) fp) where showResult = \case GetLedgerTip -> show GetEpochNo -> show @@ -697,7 +755,7 @@ instance ShelleyCompatible proto era => ShowQuery (BlockQuery (ShelleyBlock prot GetBigLedgerPeerSnapshot -> show -- | Is the given query supported by the given 'ShelleyNodeToClientVersion'? -querySupportedVersion :: BlockQuery (ShelleyBlock proto era) result -> ShelleyNodeToClientVersion -> Bool +querySupportedVersion :: BlockQuery (ShelleyBlock proto era) fp result -> ShelleyNodeToClientVersion -> Bool querySupportedVersion = \case GetLedgerTip -> (>= v1) GetEpochNo -> (>= v1) @@ -781,7 +839,7 @@ getFilteredDelegationsAndRewardAccounts ss creds = filteredDelegations = Map.mapMaybe umElemSPool umElemsRestricted filteredRwdAcnts = - Map.mapMaybe (\e -> fromCompact . rdReward <$> umElemRDPair e) umElemsRestricted + Map.mapMaybe (fmap (fromCompact . rdReward) . umElemRDPair) umElemsRestricted getFilteredVoteDelegatees :: SL.NewEpochState era @@ -797,8 +855,8 @@ getFilteredVoteDelegatees ss creds = Map.mapMaybe umElemDRep umElemsRestricted -------------------------------------------------------------------------------} encodeShelleyQuery :: - forall era proto result. ShelleyBasedEra era - => BlockQuery (ShelleyBlock proto era) result -> Encoding + forall era proto fp result. ShelleyBasedEra era + => BlockQuery (ShelleyBlock proto era) fp result -> Encoding encodeShelleyQuery query = case query of GetLedgerTip -> CBOR.encodeListLen 1 <> CBOR.encodeWord8 0 @@ -873,7 +931,7 @@ encodeShelleyQuery query = case query of decodeShelleyQuery :: forall era proto. ShelleyBasedEra era - => forall s. Decoder s (SomeSecond BlockQuery (ShelleyBlock proto era)) + => forall s. Decoder s (SomeBlockQuery (BlockQuery (ShelleyBlock proto era))) decodeShelleyQuery = do len <- CBOR.decodeListLen tag <- CBOR.decodeWord8 @@ -892,52 +950,52 @@ decodeShelleyQuery = do Nothing -> failmsg "that query is not supported before Conway," case (len, tag) of - (1, 0) -> return $ SomeSecond GetLedgerTip - (1, 1) -> return $ SomeSecond GetEpochNo - (2, 2) -> SomeSecond . GetNonMyopicMemberRewards <$> fromCBOR - (1, 3) -> return $ SomeSecond GetCurrentPParams - (1, 4) -> return $ SomeSecond GetProposedPParamsUpdates - (1, 5) -> return $ SomeSecond GetStakeDistribution - (2, 6) -> SomeSecond . GetUTxOByAddress <$> LC.fromEraCBOR @era - (1, 7) -> return $ SomeSecond GetUTxOWhole - (1, 8) -> return $ SomeSecond DebugEpochState - (2, 9) -> (\(SomeSecond q) -> SomeSecond (GetCBOR q)) <$> decodeShelleyQuery - (2, 10) -> SomeSecond . GetFilteredDelegationsAndRewardAccounts <$> LC.fromEraCBOR @era - (1, 11) -> return $ SomeSecond GetGenesisConfig - (1, 12) -> return $ SomeSecond DebugNewEpochState - (1, 13) -> return $ SomeSecond DebugChainDepState - (1, 14) -> return $ SomeSecond GetRewardProvenance - (2, 15) -> SomeSecond . GetUTxOByTxIn <$> LC.fromEraCBOR @era - (1, 16) -> return $ SomeSecond GetStakePools - (2, 17) -> SomeSecond . GetStakePoolParams <$> fromCBOR - (1, 18) -> return $ SomeSecond GetRewardInfoPools - (2, 19) -> SomeSecond . GetPoolState <$> fromCBOR - (2, 20) -> SomeSecond . GetStakeSnapshots <$> fromCBOR - (2, 21) -> SomeSecond . GetPoolDistr <$> fromCBOR - (2, 22) -> SomeSecond . GetStakeDelegDeposits <$> fromCBOR - (1, 23) -> requireCG $ return $ SomeSecond GetConstitution - (1, 24) -> return $ SomeSecond GetGovState - (2, 25) -> requireCG $ SomeSecond . GetDRepState <$> fromCBOR - (2, 26) -> requireCG $ SomeSecond . GetDRepStakeDistr <$> LC.fromEraCBOR @era + (1, 0) -> return $ SomeBlockQuery GetLedgerTip + (1, 1) -> return $ SomeBlockQuery GetEpochNo + (2, 2) -> SomeBlockQuery . GetNonMyopicMemberRewards <$> fromCBOR + (1, 3) -> return $ SomeBlockQuery GetCurrentPParams + (1, 4) -> return $ SomeBlockQuery GetProposedPParamsUpdates + (1, 5) -> return $ SomeBlockQuery GetStakeDistribution + (2, 6) -> SomeBlockQuery . GetUTxOByAddress <$> LC.fromEraCBOR @era + (1, 7) -> return $ SomeBlockQuery GetUTxOWhole + (1, 8) -> return $ SomeBlockQuery DebugEpochState + (2, 9) -> (\(SomeBlockQuery q) -> SomeBlockQuery (GetCBOR q)) <$> decodeShelleyQuery + (2, 10) -> SomeBlockQuery . GetFilteredDelegationsAndRewardAccounts <$> LC.fromEraCBOR @era + (1, 11) -> return $ SomeBlockQuery GetGenesisConfig + (1, 12) -> return $ SomeBlockQuery DebugNewEpochState + (1, 13) -> return $ SomeBlockQuery DebugChainDepState + (1, 14) -> return $ SomeBlockQuery GetRewardProvenance + (2, 15) -> SomeBlockQuery . GetUTxOByTxIn <$> LC.fromEraCBOR @era + (1, 16) -> return $ SomeBlockQuery GetStakePools + (2, 17) -> SomeBlockQuery . GetStakePoolParams <$> fromCBOR + (1, 18) -> return $ SomeBlockQuery GetRewardInfoPools + (2, 19) -> SomeBlockQuery . GetPoolState <$> fromCBOR + (2, 20) -> SomeBlockQuery . GetStakeSnapshots <$> fromCBOR + (2, 21) -> SomeBlockQuery . GetPoolDistr <$> fromCBOR + (2, 22) -> SomeBlockQuery . GetStakeDelegDeposits <$> fromCBOR + (1, 23) -> requireCG $ return $ SomeBlockQuery GetConstitution + (1, 24) -> return $ SomeBlockQuery GetGovState + (2, 25) -> requireCG $ SomeBlockQuery . GetDRepState <$> fromCBOR + (2, 26) -> requireCG $ SomeBlockQuery . GetDRepStakeDistr <$> LC.fromEraCBOR @era (4, 27) -> requireCG $ do coldCreds <- fromCBOR hotCreds <- fromCBOR statuses <- LC.fromEraCBOR @era - return $ SomeSecond $ GetCommitteeMembersState coldCreds hotCreds statuses + return $ SomeBlockQuery $ GetCommitteeMembersState coldCreds hotCreds statuses (2, 28) -> requireCG $ do - SomeSecond . GetFilteredVoteDelegatees <$> LC.fromEraCBOR @era - (1, 29) -> return $ SomeSecond GetAccountState - (2, 30) -> requireCG $ SomeSecond . GetSPOStakeDistr <$> LC.fromEraCBOR @era - (2, 31) -> requireCG $ SomeSecond . GetProposals <$> LC.fromEraCBOR @era - (1, 32) -> requireCG $ return $ SomeSecond GetRatifyState - (1, 33) -> requireCG $ return $ SomeSecond GetFuturePParams - (1, 34) -> return $ SomeSecond GetBigLedgerPeerSnapshot + SomeBlockQuery . GetFilteredVoteDelegatees <$> LC.fromEraCBOR @era + (1, 29) -> return $ SomeBlockQuery GetAccountState + (2, 30) -> requireCG $ SomeBlockQuery . GetSPOStakeDistr <$> LC.fromEraCBOR @era + (2, 31) -> requireCG $ SomeBlockQuery . GetProposals <$> LC.fromEraCBOR @era + (1, 32) -> requireCG $ return $ SomeBlockQuery GetRatifyState + (1, 33) -> requireCG $ return $ SomeBlockQuery GetFuturePParams + (1, 34) -> return $ SomeBlockQuery GetBigLedgerPeerSnapshot _ -> failmsg "invalid" encodeShelleyResult :: - forall proto era result. ShelleyCompatible proto era + forall proto era fp result. ShelleyCompatible proto era => ShelleyNodeToClientVersion - -> BlockQuery (ShelleyBlock proto era) result -> result -> Encoding + -> BlockQuery (ShelleyBlock proto era) fp result -> result -> Encoding encodeShelleyResult v query = case query of GetLedgerTip -> encodePoint encode GetEpochNo -> toCBOR @@ -976,9 +1034,9 @@ encodeShelleyResult v query = case query of GetBigLedgerPeerSnapshot -> toCBOR decodeShelleyResult :: - forall proto era result. ShelleyCompatible proto era + forall proto era fp result. ShelleyCompatible proto era => ShelleyNodeToClientVersion - -> BlockQuery (ShelleyBlock proto era) result + -> BlockQuery (ShelleyBlock proto era) fp result -> forall s. Decoder s result decodeShelleyResult v query = case query of GetLedgerTip -> decodePoint decode @@ -1107,3 +1165,114 @@ instance <*> fromCBOR <*> fromCBOR <*> fromCBOR + +{------------------------------------------------------------------------------- + Instances to implement BlockSupportsHFLedgerQuery +-------------------------------------------------------------------------------} + +answerShelleyLookupQueries :: + forall xs proto era m result. + ( HasCanonicalTxIn xs + , HasHardForkTxOut xs + , CanHardFork xs + , BlockSupportsHFLedgerQuery xs + , Monad m + , ShelleyCompatible proto era + ) + => Index xs (ShelleyBlock proto era) + -> ExtLedgerCfg (ShelleyBlock proto era) + -> BlockQuery (ShelleyBlock proto era) QFLookupTables result + -> ReadOnlyForker' m (HardForkBlock xs) + -> m result +answerShelleyLookupQueries idx cfg q forker = + case q of + GetUTxOByTxIn txins -> + answerGetUtxOByTxIn txins + GetCBOR q' -> + mkSerialised (encodeShelleyResult maxBound q') + <$> answerBlockQueryHFLookup idx cfg q' forker + where + answerGetUtxOByTxIn :: + Set.Set (SL.TxIn (EraCrypto era)) + -> m (SL.UTxO era) + answerGetUtxOByTxIn txins = do + LedgerTables (ValuesMK values) <- + LedgerDB.roforkerReadTables + forker + (castLedgerTables $ injectLedgerTables idx (LedgerTables $ KeysMK txins)) + pure + $ SL.UTxO + $ Map.mapKeys (distribCanonicalTxIn idx) + $ Map.mapMaybeWithKey + (\k v -> + if distribCanonicalTxIn idx k `Set.member` txins + then Just $ distribHardForkTxOut idx v + else Nothing) + values + +filterGetUTxOByAddressOne :: + (ShelleyBasedEra era, EraCrypto era ~ c) + => Set (Addr c) + -> LC.TxOut era + -> Bool +filterGetUTxOByAddressOne addrs = + let + compactAddrSet = Set.map compactAddr addrs + checkAddr out = + case out ^. SL.addrEitherTxOutL of + Left addr -> addr `Set.member` addrs + Right cAddr -> cAddr `Set.member` compactAddrSet + in + checkAddr + +answerShelleyTraversingQueries :: + forall xs proto era m result. + ( ShelleyCompatible proto era + , BlockSupportsHFLedgerQuery xs + , HasCanonicalTxIn xs + , HasHardForkTxOut xs + , HardForkHasLedgerTables xs + , CanHardFork xs + ) + => Monad m + => Index xs (ShelleyBlock proto era) + -> ExtLedgerCfg (ShelleyBlock proto era) + -> BlockQuery (ShelleyBlock proto era) QFTraverseTables result + -> ReadOnlyForker' m (HardForkBlock xs) + -> m result +answerShelleyTraversingQueries idx cfg q forker = case q of + GetUTxOByAddress{} -> loop (queryLedgerGetTraversingFilter idx q) NoPreviousQuery emptyUtxo + GetUTxOWhole -> loop (queryLedgerGetTraversingFilter idx q) NoPreviousQuery emptyUtxo + GetCBOR q' -> + mkSerialised (encodeShelleyResult maxBound q') <$> + answerBlockQueryHFTraverse idx cfg q' forker + where + emptyUtxo = SL.UTxO Map.empty + + combUtxo (SL.UTxO l) vs = SL.UTxO $ Map.union l vs + + partial :: + (Value (LedgerState (HardForkBlock xs)) -> Bool) + -> LedgerTables (ExtLedgerState (HardForkBlock xs)) ValuesMK + -> Map (SL.TxIn (EraCrypto era)) (LC.TxOut era) + partial queryPredicate (LedgerTables (ValuesMK vs)) = + Map.mapKeys (distribCanonicalTxIn idx) + $ Map.mapMaybeWithKey + (\_k v -> + if queryPredicate v + then Just $ distribHardForkTxOut idx v + else Nothing) + vs + + f :: ValuesMK k v -> Bool + f (ValuesMK vs) = Map.null vs + + toKey (LedgerTables (ValuesMK vs)) = fst $ Map.findMax vs + + loop queryPredicate !prev !acc = do + extValues <- LedgerDB.roforkerRangeReadTables forker prev + if ltcollapse $ ltmap (K2 . f) extValues + then pure acc + else loop queryPredicate + (PreviousQueryWasUpTo $ toKey extValues) + (combUtxo acc $ partial queryPredicate extValues) diff --git a/ouroboros-consensus-cardano/src/shelley/Ouroboros/Consensus/Shelley/Ledger/SupportsProtocol.hs b/ouroboros-consensus-cardano/src/shelley/Ouroboros/Consensus/Shelley/Ledger/SupportsProtocol.hs index 3f8895fc22..69a1158597 100644 --- a/ouroboros-consensus-cardano/src/shelley/Ouroboros/Consensus/Shelley/Ledger/SupportsProtocol.hs +++ b/ouroboros-consensus-cardano/src/shelley/Ouroboros/Consensus/Shelley/Ledger/SupportsProtocol.hs @@ -35,6 +35,7 @@ import Ouroboros.Consensus.HardFork.History.Util import Ouroboros.Consensus.Ledger.Abstract import Ouroboros.Consensus.Ledger.SupportsProtocol (LedgerSupportsProtocol (..)) +import Ouroboros.Consensus.Ledger.Tables.Utils import Ouroboros.Consensus.Protocol.Abstract (TranslateProto, translateLedgerView) import Ouroboros.Consensus.Protocol.Praos (Praos) @@ -117,11 +118,12 @@ instance mapForecast (translateLedgerView (Proxy @(TPraos crypto, Praos crypto))) $ ledgerViewForecastAt @(ShelleyBlock (TPraos crypto) era) cfg st' where - st' :: LedgerState (ShelleyBlock (TPraos crypto) era) + st' :: LedgerState (ShelleyBlock (TPraos crypto) era) EmptyMK st' = ShelleyLedgerState { shelleyLedgerTip = coerceTip <$> shelleyLedgerTip st, shelleyLedgerState = shelleyLedgerState st, - shelleyLedgerTransition = shelleyLedgerTransition st + shelleyLedgerTransition = shelleyLedgerTransition st, + shelleyLedgerTables = emptyLedgerTables } coerceTip (ShelleyTip slot block hash) = ShelleyTip slot block (coerce hash) diff --git a/ouroboros-consensus-cardano/src/shelley/Ouroboros/Consensus/Shelley/Node/Serialisation.hs b/ouroboros-consensus-cardano/src/shelley/Ouroboros/Consensus/Shelley/Node/Serialisation.hs index 29bbd5ddf2..d83e8d64f2 100644 --- a/ouroboros-consensus-cardano/src/shelley/Ouroboros/Consensus/Shelley/Node/Serialisation.hs +++ b/ouroboros-consensus-cardano/src/shelley/Ouroboros/Consensus/Shelley/Node/Serialisation.hs @@ -1,3 +1,4 @@ +{-# LANGUAGE DataKinds #-} {-# LANGUAGE FlexibleInstances #-} {-# LANGUAGE MultiParamTypeClasses #-} {-# LANGUAGE ScopedTypeVariables #-} @@ -19,7 +20,9 @@ import qualified Data.ByteString.Lazy as Lazy import Data.Typeable (Typeable) import Ouroboros.Consensus.Block import Ouroboros.Consensus.HeaderValidation +import Ouroboros.Consensus.Ledger.Query import Ouroboros.Consensus.Ledger.SupportsMempool (GenTxId) +import Ouroboros.Consensus.Ledger.Tables (EmptyMK) import Ouroboros.Consensus.Node.Run import Ouroboros.Consensus.Node.Serialisation import Ouroboros.Consensus.Protocol.Praos (PraosState) @@ -53,9 +56,9 @@ instance ShelleyCompatible proto era => EncodeDisk (ShelleyBlock proto era) (Hea instance ShelleyCompatible proto era => DecodeDisk (ShelleyBlock proto era) (Lazy.ByteString -> Header (ShelleyBlock proto era)) where decodeDisk _ = decodeShelleyHeader -instance ShelleyCompatible proto era => EncodeDisk (ShelleyBlock proto era) (LedgerState (ShelleyBlock proto era)) where +instance ShelleyCompatible proto era => EncodeDisk (ShelleyBlock proto era) (LedgerState (ShelleyBlock proto era) EmptyMK) where encodeDisk _ = encodeShelleyLedgerState -instance ShelleyCompatible proto era => DecodeDisk (ShelleyBlock proto era) (LedgerState (ShelleyBlock proto era)) where +instance ShelleyCompatible proto era => DecodeDisk (ShelleyBlock proto era) (LedgerState (ShelleyBlock proto era) EmptyMK) where decodeDisk _ = decodeShelleyLedgerState -- | @'ChainDepState' ('BlockProtocol' ('ShelleyBlock' era))@ @@ -139,7 +142,7 @@ data ShelleyEncoderException era proto = -- | A query was submitted that is not supported by the given -- 'ShelleyNodeToClientVersion'. ShelleyEncoderUnsupportedQuery - (SomeSecond BlockQuery (ShelleyBlock proto era)) + (SomeBlockQuery (BlockQuery (ShelleyBlock proto era))) ShelleyNodeToClientVersion deriving (Show) @@ -177,17 +180,17 @@ instance ShelleyBasedEra era => SerialiseNodeToClient (ShelleyBlock proto era) ( decodeNodeToClient _ _ = fromEraCBOR @era instance ShelleyCompatible proto era - => SerialiseNodeToClient (ShelleyBlock proto era) (SomeSecond BlockQuery (ShelleyBlock proto era)) where - encodeNodeToClient _ version (SomeSecond q) + => SerialiseNodeToClient (ShelleyBlock proto era) (SomeBlockQuery (BlockQuery (ShelleyBlock proto era))) where + encodeNodeToClient _ version (SomeBlockQuery q) | querySupportedVersion q version = encodeShelleyQuery q | otherwise - = throw $ ShelleyEncoderUnsupportedQuery (SomeSecond q) version + = throw $ ShelleyEncoderUnsupportedQuery (SomeBlockQuery q) version decodeNodeToClient _ _ = decodeShelleyQuery -instance ShelleyCompatible proto era => SerialiseResult (ShelleyBlock proto era) (BlockQuery (ShelleyBlock proto era)) where - encodeResult _ = encodeShelleyResult - decodeResult _ = decodeShelleyResult +instance ShelleyCompatible proto era => SerialiseResult' (ShelleyBlock proto era) BlockQuery where + encodeResult' _ = encodeShelleyResult + decodeResult' _ = decodeShelleyResult instance ShelleyCompatible proto era => SerialiseNodeToClient (ShelleyBlock proto era) SlotNo where encodeNodeToClient _ _ = toCBOR diff --git a/ouroboros-consensus-cardano/src/shelley/Ouroboros/Consensus/Shelley/Node/TPraos.hs b/ouroboros-consensus-cardano/src/shelley/Ouroboros/Consensus/Shelley/Node/TPraos.hs index 44d13dee75..254dbf564b 100644 --- a/ouroboros-consensus-cardano/src/shelley/Ouroboros/Consensus/Shelley/Node/TPraos.hs +++ b/ouroboros-consensus-cardano/src/shelley/Ouroboros/Consensus/Shelley/Node/TPraos.hs @@ -272,20 +272,22 @@ protocolInfoTPraosShelleyBased ProtocolParamsShelleyBased { , shelleyStorageConfigSecurityParam = tpraosSecurityParam tpraosParams } - initLedgerState :: LedgerState (ShelleyBlock (TPraos c) era) + initLedgerState :: LedgerState (ShelleyBlock (TPraos c) era) ValuesMK initLedgerState = ShelleyLedgerState { shelleyLedgerTip = Origin - , shelleyLedgerState = - L.injectIntoTestState transitionCfg - $ L.createInitialState transitionCfg + , shelleyLedgerState = st `withUtxoSL` emptyMK , shelleyLedgerTransition = ShelleyTransitionInfo {shelleyAfterVoting = 0} + , shelleyLedgerTables = LedgerTables $ projectUtxoSL st } + where + st = L.injectIntoTestState transitionCfg + $ L.createInitialState transitionCfg initChainDepState :: TPraosState c initChainDepState = TPraosState Origin $ SL.initialChainDepState initialNonce (SL.sgGenDelegs genesis) - initExtLedgerState :: ExtLedgerState (ShelleyBlock (TPraos c) era) + initExtLedgerState :: ExtLedgerState (ShelleyBlock (TPraos c) era) ValuesMK initExtLedgerState = ExtLedgerState { ledgerState = initLedgerState , headerState = genesisHeaderState initChainDepState diff --git a/ouroboros-consensus-cardano/src/shelley/Ouroboros/Consensus/Shelley/ShelleyHFC.hs b/ouroboros-consensus-cardano/src/shelley/Ouroboros/Consensus/Shelley/ShelleyHFC.hs index 1527dc8c6c..1fb3fa0063 100644 --- a/ouroboros-consensus-cardano/src/shelley/Ouroboros/Consensus/Shelley/ShelleyHFC.hs +++ b/ouroboros-consensus-cardano/src/shelley/Ouroboros/Consensus/Shelley/ShelleyHFC.hs @@ -1,8 +1,12 @@ {-# LANGUAGE DataKinds #-} {-# LANGUAGE DeriveAnyClass #-} {-# LANGUAGE DeriveGeneric #-} +{-# LANGUAGE DerivingStrategies #-} +{-# LANGUAGE EmptyCase #-} {-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE FlexibleInstances #-} +{-# LANGUAGE GeneralizedNewtypeDeriving #-} +{-# LANGUAGE LambdaCase #-} {-# LANGUAGE MultiParamTypeClasses #-} {-# LANGUAGE RecordWildCards #-} {-# LANGUAGE ScopedTypeVariables #-} @@ -35,6 +39,8 @@ import Control.Monad.Except (runExcept, throwError, withExceptT) import Data.Coerce import qualified Data.Map.Strict as Map import Data.SOP.BasicFunctors +import Data.SOP.Functors (Flip (..)) +import Data.SOP.Index (Index (..)) import Data.SOP.InPairs (RequiringBoth (..), ignoringBoth) import qualified Data.Text as T (pack) import Data.Void (Void) @@ -138,10 +144,10 @@ type ProtocolShelley = HardForkProtocol '[ ShelleyBlock (TPraos StandardCrypto) -------------------------------------------------------------------------------} shelleyTransition :: - forall era proto. ShelleyCompatible proto era + forall era proto mk. ShelleyCompatible proto era => PartialLedgerConfig (ShelleyBlock proto era) -> Word16 -- ^ Next era's initial major protocol version - -> LedgerState (ShelleyBlock proto era) + -> LedgerState (ShelleyBlock proto era) mk -> Maybe EpochNo shelleyTransition ShelleyPartialLedgerConfig{..} transitionMajorVersionRaw @@ -269,7 +275,7 @@ crossEraForecastAcrossShelley = coerce forecastAcrossShelley -- | Forecast from a Shelley-based era to the next Shelley-based era. forecastAcrossShelley :: - forall protoFrom protoTo eraFrom eraTo. + forall protoFrom protoTo eraFrom eraTo mk. ( TranslateProto protoFrom protoTo , LedgerSupportsProtocol (ShelleyBlock protoFrom eraFrom) ) @@ -277,7 +283,7 @@ forecastAcrossShelley :: -> ShelleyLedgerConfig eraTo -> Bound -- ^ Transition between the two eras -> SlotNo -- ^ Forecast for this slot - -> LedgerState (ShelleyBlock protoFrom eraFrom) + -> LedgerState (ShelleyBlock protoFrom eraFrom) mk -> Except OutsideForecastRange (WrapLedgerView (ShelleyBlock protoTo eraTo)) forecastAcrossShelley cfgFrom cfgTo transition forecastFor ledgerStateFrom | forecastFor < maxFor @@ -321,19 +327,34 @@ instance ( ShelleyBasedEra era return $ ShelleyTip sno bno (ShelleyHash hash) instance ( ShelleyBasedEra era + , ShelleyBasedEra (SL.PreviousEra era) , SL.TranslateEra era (ShelleyTip proto) , SL.TranslateEra era SL.NewEpochState , SL.TranslationError era SL.NewEpochState ~ Void - ) => SL.TranslateEra era (LedgerState :.: ShelleyBlock proto) where - translateEra ctxt (Comp (ShelleyLedgerState tip state _transition)) = do + , EraCrypto (SL.PreviousEra era) ~ EraCrypto era + , CanMapMK mk + ) => SL.TranslateEra era (Flip LedgerState mk :.: ShelleyBlock proto) where + translateEra ctxt (Comp (Flip (ShelleyLedgerState tip state _transition tables))) = do tip' <- mapM (SL.translateEra ctxt) tip state' <- SL.translateEra ctxt state - return $ Comp $ ShelleyLedgerState { + return $ Comp $ Flip $ ShelleyLedgerState { shelleyLedgerTip = tip' , shelleyLedgerState = state' , shelleyLedgerTransition = ShelleyTransitionInfo 0 + , shelleyLedgerTables = translateShelleyTables tables } +translateShelleyTables :: + ( EraCrypto (SL.PreviousEra era) ~ EraCrypto era + , CanMapMK mk + , ShelleyBasedEra era + , ShelleyBasedEra (SL.PreviousEra era) + ) + => LedgerTables (LedgerState (ShelleyBlock proto (SL.PreviousEra era))) mk + -> LedgerTables (LedgerState (ShelleyBlock proto era)) mk +translateShelleyTables (LedgerTables utxoTable) = + LedgerTables $ mapMK SL.upgradeTxOut utxoTable + instance ( ShelleyBasedEra era , SL.TranslateEra era WrapTx ) => SL.TranslateEra era (GenTx :.: ShelleyBlock proto) where @@ -350,3 +371,67 @@ instance ( ShelleyBasedEra era Comp . WrapValidatedGenTx . mkShelleyValidatedTx . SL.coerceValidated <$> SL.translateValidated @era @WrapTx ctxt (SL.coerceValidated vtx) + +{------------------------------------------------------------------------------- + Canonical TxIn +-------------------------------------------------------------------------------} + +instance ShelleyBasedEra era + => HasCanonicalTxIn '[ShelleyBlock proto era] where + newtype instance CanonicalTxIn '[ShelleyBlock proto era] = ShelleyBlockHFCTxIn { + getShelleyBlockHFCTxIn :: SL.TxIn (EraCrypto era) + } + deriving stock (Show, Eq, Ord) + deriving newtype NoThunks + + injectCanonicalTxIn IZ txIn = ShelleyBlockHFCTxIn txIn + injectCanonicalTxIn (IS idx') _ = case idx' of {} + + distribCanonicalTxIn IZ txIn = getShelleyBlockHFCTxIn txIn + distribCanonicalTxIn (IS idx') _ = case idx' of {} + + encodeCanonicalTxIn (ShelleyBlockHFCTxIn txIn) = SL.toEraCBOR @era txIn + + decodeCanonicalTxIn = ShelleyBlockHFCTxIn <$> SL.fromEraCBOR @era + +{------------------------------------------------------------------------------- + HardForkTxOut +-------------------------------------------------------------------------------} + +instance HasHardForkTxOut '[ShelleyBlock proto era] where + type instance HardForkTxOut '[ShelleyBlock proto era] = SL.TxOut era + injectHardForkTxOut IZ txOut = txOut + injectHardForkTxOut (IS idx') _ = case idx' of {} + distribHardForkTxOut IZ txOut = txOut + distribHardForkTxOut (IS idx') _ = case idx' of {} + +instance ShelleyBasedEra era => SerializeHardForkTxOut '[ShelleyBlock proto era] where + encodeHardForkTxOut _ = SL.toEraCBOR @era + decodeHardForkTxOut _ = SL.fromEraCBOR @era + +{------------------------------------------------------------------------------- + Queries +-------------------------------------------------------------------------------} + +instance ( ShelleyCompatible proto era + , ShelleyBasedEra era + , Key (LedgerState (ShelleyBlock proto era)) ~ SL.TxIn (EraCrypto era) + , Value (LedgerState (ShelleyBlock proto era)) ~ SL.TxOut era + , HasHardForkTxOut '[ShelleyBlock proto era] + ) => BlockSupportsHFLedgerQuery '[ShelleyBlock proto era] where + + answerBlockQueryHFLookup IZ cfg q dlv = + answerShelleyLookupQueries IZ cfg q dlv + answerBlockQueryHFLookup (IS idx) _ _ _ = case idx of {} + + answerBlockQueryHFTraverse IZ cfg q dlv = + answerShelleyTraversingQueries IZ cfg q dlv + answerBlockQueryHFTraverse (IS idx) _ _ _ = case idx of {} + + queryLedgerGetTraversingFilter idx@IZ = \case + GetUTxOByAddress addrs -> + filterGetUTxOByAddressOne addrs + GetUTxOWhole -> + const True + GetCBOR q' -> queryLedgerGetTraversingFilter idx q' + queryLedgerGetTraversingFilter (IS idx) = case idx of {} diff --git a/ouroboros-consensus-cardano/src/unstable-byron-testlib/Ouroboros/Consensus/ByronDual/Ledger.hs b/ouroboros-consensus-cardano/src/unstable-byron-testlib/Ouroboros/Consensus/ByronDual/Ledger.hs index 52778758b7..81e449725e 100644 --- a/ouroboros-consensus-cardano/src/unstable-byron-testlib/Ouroboros/Consensus/ByronDual/Ledger.hs +++ b/ouroboros-consensus-cardano/src/unstable-byron-testlib/Ouroboros/Consensus/ByronDual/Ledger.hs @@ -202,7 +202,7 @@ forgeDualByronBlock :: => TopLevelConfig DualByronBlock -> BlockNo -- ^ Current block number -> SlotNo -- ^ Current slot number - -> TickedLedgerState DualByronBlock -- ^ Ledger + -> TickedLedgerState DualByronBlock mk -- ^ Ledger -> [Validated (GenTx DualByronBlock)] -- ^ Txs to add in the block -> PBftIsLeader PBftByronCrypto -- ^ Leader proof ('IsLeader') -> DualByronBlock diff --git a/ouroboros-consensus-cardano/src/unstable-byron-testlib/Ouroboros/Consensus/ByronDual/Node.hs b/ouroboros-consensus-cardano/src/unstable-byron-testlib/Ouroboros/Consensus/ByronDual/Node.hs index 0b2045dd7c..884d17b8e4 100644 --- a/ouroboros-consensus-cardano/src/unstable-byron-testlib/Ouroboros/Consensus/ByronDual/Node.hs +++ b/ouroboros-consensus-cardano/src/unstable-byron-testlib/Ouroboros/Consensus/ByronDual/Node.hs @@ -44,7 +44,7 @@ import Ouroboros.Consensus.NodeId import Ouroboros.Consensus.Protocol.PBFT import qualified Ouroboros.Consensus.Protocol.PBFT.State as S import Ouroboros.Consensus.Storage.ChainDB.Init (InitChainDB (..)) -import Ouroboros.Consensus.Util ((.....:), (.:)) +import Ouroboros.Consensus.Util ((.....:)) import qualified Test.Cardano.Chain.Elaboration.Block as Spec.Test import qualified Test.Cardano.Chain.Elaboration.Delegation as Spec.Test import qualified Test.Cardano.Chain.Elaboration.Keys as Spec.Test @@ -141,8 +141,8 @@ protocolInfoDualByron abstractGenesis@ByronSpecGenesis{..} params credss = configGenesisData = Impl.configGenesisData translated protocolParameters = Impl.gdProtocolParameters configGenesisData - initAbstractState :: LedgerState ByronSpecBlock - initConcreteState :: LedgerState ByronBlock + initAbstractState :: LedgerState ByronSpecBlock ValuesMK + initConcreteState :: LedgerState ByronBlock ValuesMK initAbstractState = initByronSpecLedgerState abstractGenesis initConcreteState = initByronLedgerState concreteGenesis (Just initUtxo) diff --git a/ouroboros-consensus-cardano/src/unstable-byron-testlib/Ouroboros/Consensus/ByronDual/Node/Serialisation.hs b/ouroboros-consensus-cardano/src/unstable-byron-testlib/Ouroboros/Consensus/ByronDual/Node/Serialisation.hs index 052da0c3f3..db9657f644 100644 --- a/ouroboros-consensus-cardano/src/unstable-byron-testlib/Ouroboros/Consensus/ByronDual/Node/Serialisation.hs +++ b/ouroboros-consensus-cardano/src/unstable-byron-testlib/Ouroboros/Consensus/ByronDual/Node/Serialisation.hs @@ -1,3 +1,4 @@ +{-# LANGUAGE DataKinds #-} {-# LANGUAGE EmptyCase #-} {-# LANGUAGE FlexibleInstances #-} {-# LANGUAGE LambdaCase #-} @@ -19,7 +20,9 @@ import Ouroboros.Consensus.ByronDual.Ledger import Ouroboros.Consensus.ByronSpec.Ledger import Ouroboros.Consensus.HeaderValidation import Ouroboros.Consensus.Ledger.Dual +import Ouroboros.Consensus.Ledger.Query import Ouroboros.Consensus.Ledger.SupportsMempool (GenTxId) +import Ouroboros.Consensus.Ledger.Tables import Ouroboros.Consensus.Node.NetworkProtocolVersion import Ouroboros.Consensus.Node.Run import Ouroboros.Consensus.Node.Serialisation @@ -63,9 +66,9 @@ instance DecodeDiskDep (NestedCtxt Header) DualByronBlock where (NestedCtxt (CtxtDual ctxt)) = decodeDiskDep ccfg (NestedCtxt ctxt) -instance EncodeDisk DualByronBlock (LedgerState DualByronBlock) where +instance EncodeDisk DualByronBlock (LedgerState DualByronBlock EmptyMK) where encodeDisk _ = encodeDualLedgerState encodeByronLedgerState -instance DecodeDisk DualByronBlock (LedgerState DualByronBlock) where +instance DecodeDisk DualByronBlock (LedgerState DualByronBlock EmptyMK) where decodeDisk _ = decodeDualLedgerState decodeByronLedgerState -- | @'ChainDepState' ('BlockProtocol' 'DualByronBlock')@ @@ -162,15 +165,15 @@ instance SerialiseNodeToClient DualByronBlock (DualGenTxErr ByronBlock ByronSpec encodeNodeToClient _ _ = encodeDualGenTxErr encodeByronApplyTxError decodeNodeToClient _ _ = decodeDualGenTxErr decodeByronApplyTxError -instance SerialiseNodeToClient DualByronBlock (SomeSecond BlockQuery DualByronBlock) where - encodeNodeToClient _ _ = \case {} +instance SerialiseNodeToClient DualByronBlock (SomeBlockQuery (BlockQuery DualByronBlock)) where + encodeNodeToClient _ _ (SomeBlockQuery q) = case q of {} decodeNodeToClient _ _ = error "DualByron: no query to decode" instance SerialiseNodeToClient DualByronBlock SlotNo -instance SerialiseResult DualByronBlock (BlockQuery DualByronBlock) where - encodeResult _ _ = \case {} - decodeResult _ _ = \case {} +instance SerialiseResult' DualByronBlock BlockQuery where + encodeResult' _ _ = \case {} + decodeResult' _ _ = \case {} {------------------------------------------------------------------------------- Auxiliary diff --git a/ouroboros-consensus-cardano/src/unstable-byron-testlib/Test/Consensus/Byron/Examples.hs b/ouroboros-consensus-cardano/src/unstable-byron-testlib/Test/Consensus/Byron/Examples.hs index 9626d98aa9..e70815595d 100644 --- a/ouroboros-consensus-cardano/src/unstable-byron-testlib/Test/Consensus/Byron/Examples.hs +++ b/ouroboros-consensus-cardano/src/unstable-byron-testlib/Test/Consensus/Byron/Examples.hs @@ -1,3 +1,4 @@ +{-# LANGUAGE DataKinds #-} {-# LANGUAGE DisambiguateRecordFields #-} {-# LANGUAGE OverloadedStrings #-} @@ -38,6 +39,8 @@ import Ouroboros.Consensus.Config import Ouroboros.Consensus.HeaderValidation import Ouroboros.Consensus.Ledger.Abstract import Ouroboros.Consensus.Ledger.Extended +import Ouroboros.Consensus.Ledger.Query +import Ouroboros.Consensus.Ledger.Tables.Utils import Ouroboros.Consensus.NodeId import Ouroboros.Consensus.Protocol.Abstract import Ouroboros.Consensus.Protocol.PBFT @@ -104,16 +107,17 @@ examples = Examples { , exampleQuery = unlabelled exampleQuery , exampleResult = unlabelled exampleResult , exampleAnnTip = unlabelled exampleAnnTip - , exampleLedgerState = unlabelled exampleLedgerState + , exampleLedgerState = unlabelled $ forgetLedgerTables exampleLedgerState , exampleChainDepState = unlabelled exampleChainDepState - , exampleExtLedgerState = unlabelled exampleExtLedgerState + , exampleExtLedgerState = unlabelled $ forgetLedgerTables exampleExtLedgerState , exampleSlotNo = unlabelled exampleSlotNo + , exampleLedgerTables = unlabelled emptyLedgerTables } where regularAndEBB :: a -> a -> Labelled a regularAndEBB regular ebb = labelled [("regular", regular), ("EBB", ebb)] - exampleQuery = SomeSecond GetUpdateInterfaceState + exampleQuery = SomeBlockQuery GetUpdateInterfaceState exampleResult = SomeResult GetUpdateInterfaceState exampleUPIState exampleBlock :: ByronBlock @@ -122,7 +126,7 @@ exampleBlock = cfg (BlockNo 1) (SlotNo 1) - (applyChainTick ledgerConfig (SlotNo 1) ledgerStateAfterEBB) + (applyChainTick ledgerConfig (SlotNo 1) (forgetLedgerTables ledgerStateAfterEBB)) [ValidatedByronTx exampleGenTx] (fakeMkIsLeader leaderCredentials) where @@ -167,7 +171,7 @@ exampleChainDepState = S.fromList signers where signers = map (`S.PBftSigner` CC.exampleKeyHash) [1..4] -emptyLedgerState :: LedgerState ByronBlock +emptyLedgerState :: LedgerState ByronBlock ValuesMK emptyLedgerState = ByronLedgerState { byronLedgerTipBlockNo = Origin , byronLedgerState = initState @@ -178,22 +182,28 @@ emptyLedgerState = ByronLedgerState { Right initState = runExcept $ CC.Block.initialChainValidationState ledgerConfig -ledgerStateAfterEBB :: LedgerState ByronBlock +ledgerStateAfterEBB :: LedgerState ByronBlock ValuesMK ledgerStateAfterEBB = - reapplyLedgerBlock ledgerConfig exampleEBB + applyDiffs emptyLedgerState + . reapplyLedgerBlock ledgerConfig exampleEBB + . applyDiffs emptyLedgerState . applyChainTick ledgerConfig (SlotNo 0) + . forgetLedgerTables $ emptyLedgerState -exampleLedgerState :: LedgerState ByronBlock +exampleLedgerState :: LedgerState ByronBlock ValuesMK exampleLedgerState = - reapplyLedgerBlock ledgerConfig exampleBlock + applyDiffs emptyLedgerState + . reapplyLedgerBlock ledgerConfig exampleBlock + . applyDiffs ledgerStateAfterEBB . applyChainTick ledgerConfig (SlotNo 1) + . forgetLedgerTables $ ledgerStateAfterEBB exampleHeaderState :: HeaderState ByronBlock exampleHeaderState = HeaderState (NotOrigin exampleAnnTip) exampleChainDepState -exampleExtLedgerState :: ExtLedgerState ByronBlock +exampleExtLedgerState :: ExtLedgerState ByronBlock ValuesMK exampleExtLedgerState = ExtLedgerState { ledgerState = exampleLedgerState , headerState = exampleHeaderState diff --git a/ouroboros-consensus-cardano/src/unstable-byron-testlib/Test/Consensus/Byron/Generators.hs b/ouroboros-consensus-cardano/src/unstable-byron-testlib/Test/Consensus/Byron/Generators.hs index ee39d62924..240a83db55 100644 --- a/ouroboros-consensus-cardano/src/unstable-byron-testlib/Test/Consensus/Byron/Generators.hs +++ b/ouroboros-consensus-cardano/src/unstable-byron-testlib/Test/Consensus/Byron/Generators.hs @@ -1,17 +1,21 @@ {-# LANGUAGE FlexibleInstances #-} {-# LANGUAGE GADTs #-} +{-# LANGUAGE NamedFieldPuns #-} {-# OPTIONS_GHC -Wno-orphans #-} module Test.Consensus.Byron.Generators ( RegularBlock (..) , epochSlots + , genByronLedgerConfig + , genByronLedgerState , k , protocolMagicId ) where import Cardano.Chain.Block (ABlockOrBoundary (..), - ABlockOrBoundaryHdr (..)) + ABlockOrBoundaryHdr (..), ChainValidationState (..), + cvsPreviousHash) import qualified Cardano.Chain.Block as CC.Block import qualified Cardano.Chain.Byron.API as API import Cardano.Chain.Common (KeyHash) @@ -19,6 +23,7 @@ import qualified Cardano.Chain.Delegation as CC.Del import qualified Cardano.Chain.Delegation.Validation.Activation as CC.Act import qualified Cardano.Chain.Delegation.Validation.Interface as CC.DI import qualified Cardano.Chain.Delegation.Validation.Scheduling as CC.Sched +import qualified Cardano.Chain.Genesis as Byron import qualified Cardano.Chain.Genesis as CC.Genesis import Cardano.Chain.Slotting (EpochNumber, EpochSlots (..), SlotNumber) @@ -29,6 +34,7 @@ import qualified Cardano.Chain.UTxO as CC.UTxO import Cardano.Crypto (ProtocolMagicId (..)) import Cardano.Crypto.Hashing (Hash) import Cardano.Ledger.Binary (decCBOR, encCBOR) +import Cardano.Slotting.Slot (WithOrigin (..)) import Control.Monad (replicateM) import Data.Coerce (coerce) import qualified Data.Map.Strict as Map @@ -37,13 +43,17 @@ import Ouroboros.Consensus.Byron.Ledger import Ouroboros.Consensus.Byron.Protocol import Ouroboros.Consensus.Config.SecurityParam import Ouroboros.Consensus.HeaderValidation (AnnTip (..)) +import Ouroboros.Consensus.Ledger.Abstract +import Ouroboros.Consensus.Ledger.Query import Ouroboros.Consensus.Ledger.SupportsMempool (GenTxId) +import Ouroboros.Consensus.Ledger.Tables.Utils (emptyLedgerTables) import Ouroboros.Consensus.Protocol.PBFT.State (PBftState) import qualified Ouroboros.Consensus.Protocol.PBFT.State as PBftState import Ouroboros.Network.SizeInBytes import qualified Test.Cardano.Chain.Block.Gen as CC import qualified Test.Cardano.Chain.Common.Gen as CC import qualified Test.Cardano.Chain.Delegation.Gen as CC +import qualified Test.Cardano.Chain.Genesis.Gen as CC import qualified Test.Cardano.Chain.MempoolPayload.Gen as CC import qualified Test.Cardano.Chain.Slotting.Gen as CC import qualified Test.Cardano.Chain.Update.Gen as UG @@ -157,8 +167,8 @@ instance Arbitrary API.ApplyMempoolPayloadErr where -- , MempoolUpdateVoteErr <$> arbitrary ] -instance Arbitrary (SomeSecond BlockQuery ByronBlock) where - arbitrary = pure $ SomeSecond GetUpdateInterfaceState +instance Arbitrary (SomeBlockQuery (BlockQuery ByronBlock)) where + arbitrary = pure $ SomeBlockQuery GetUpdateInterfaceState instance Arbitrary EpochNumber where arbitrary = hedgehog CC.genEpochNumber @@ -218,7 +228,15 @@ instance Arbitrary CC.Genesis.GenesisHash where arbitrary = CC.Genesis.GenesisHash <$> arbitrary instance Arbitrary CC.UTxO.UTxO where - arbitrary = hedgehog CC.genUTxO + arbitrary = oneof [ + hedgehog CC.genUTxO + -- We would sometimes like to run tests using a smaller (or even empty) + -- UTxO, but 'genUTxO' generates a UTxO without depending on the QC size + -- parameter. The probability of generating smaller (or empty) UTxOs is + -- therefore low. + , CC.UTxO.fromList <$> + listOf ((,) <$> hedgehog CC.genTxIn <*> hedgehog CC.genTxOut) + ] instance Arbitrary CC.Act.State where arbitrary = CC.Act.State @@ -261,9 +279,34 @@ instance Arbitrary CC.Del.Map where instance Arbitrary ByronTransition where arbitrary = ByronTransitionInfo . Map.fromList <$> arbitrary -instance Arbitrary (LedgerState ByronBlock) where +instance Arbitrary (LedgerState ByronBlock mk) where arbitrary = ByronLedgerState <$> arbitrary <*> arbitrary <*> arbitrary +-- | Generator for a Byron ledger state in which the tip of the ledger given by +-- `byronLedgerTipBlockNo` is consistent with the chain validation state, i.e., if there is no +-- previous block, the ledger tip wil be `Origin`. +genByronLedgerState :: Gen (LedgerState ByronBlock EmptyMK) +genByronLedgerState = do + chainValidationState <- arbitrary + ledgerTransition <- arbitrary + ledgerTipBlockNo <- genLedgerTipBlockNo chainValidationState + pure $ ByronLedgerState { + byronLedgerTipBlockNo = ledgerTipBlockNo + , byronLedgerState = chainValidationState + , byronLedgerTransition = ledgerTransition + } + where + genLedgerTipBlockNo ChainValidationState { cvsPreviousHash } = + case cvsPreviousHash of + Left _ -> pure Origin + Right _ -> At <$> arbitrary + +instance ZeroableMK mk => Arbitrary (LedgerTables (LedgerState ByronBlock) mk) where + arbitrary = pure emptyLedgerTables + +genByronLedgerConfig :: Gen Byron.Config +genByronLedgerConfig = hedgehog $ CC.genConfig protocolMagicId + instance Arbitrary (TipInfoIsEBB ByronBlock) where arbitrary = TipInfoIsEBB <$> arbitrary <*> elements [IsEBB, IsNotEBB] diff --git a/ouroboros-consensus-cardano/src/unstable-byron-testlib/Test/ThreadNet/Infra/Byron/TrackUpdates.hs b/ouroboros-consensus-cardano/src/unstable-byron-testlib/Test/ThreadNet/Infra/Byron/TrackUpdates.hs index 98848eecb9..34e0a1a25c 100644 --- a/ouroboros-consensus-cardano/src/unstable-byron-testlib/Test/ThreadNet/Infra/Byron/TrackUpdates.hs +++ b/ouroboros-consensus-cardano/src/unstable-byron-testlib/Test/ThreadNet/Infra/Byron/TrackUpdates.hs @@ -1,4 +1,5 @@ {-# LANGUAGE BangPatterns #-} +{-# LANGUAGE DataKinds #-} {-# LANGUAGE LambdaCase #-} {-# LANGUAGE MultiWayIf #-} {-# LANGUAGE NamedFieldPuns #-} @@ -41,6 +42,7 @@ import qualified Ouroboros.Consensus.Byron.Crypto.DSIGN as Crypto import Ouroboros.Consensus.Byron.Ledger (ByronBlock) import qualified Ouroboros.Consensus.Byron.Ledger as Byron import Ouroboros.Consensus.Config +import Ouroboros.Consensus.Ledger.Tables (EmptyMK) import Ouroboros.Consensus.Node.ProtocolInfo (NumCoreNodes (..), ProtocolInfo (..)) import Ouroboros.Consensus.NodeId (CoreNodeId (..)) @@ -88,7 +90,7 @@ mkUpdateLabels :: -> NodeJoinPlan -> NodeTopology -> Ref.Result - -> Byron.LedgerState ByronBlock + -> Byron.LedgerState ByronBlock EmptyMK -- ^ from 'nodeOutputFinalLedger' -> (ProtocolVersionUpdateLabel, SoftwareVersionUpdateLabel) mkUpdateLabels params numSlots genesisConfig nodeJoinPlan topology result diff --git a/ouroboros-consensus-cardano/src/unstable-byronspec/Ouroboros/Consensus/ByronSpec/Ledger/Forge.hs b/ouroboros-consensus-cardano/src/unstable-byronspec/Ouroboros/Consensus/ByronSpec/Ledger/Forge.hs index 91c5e74c41..4e72c4af7b 100644 --- a/ouroboros-consensus-cardano/src/unstable-byronspec/Ouroboros/Consensus/ByronSpec/Ledger/Forge.hs +++ b/ouroboros-consensus-cardano/src/unstable-byronspec/Ouroboros/Consensus/ByronSpec/Ledger/Forge.hs @@ -18,7 +18,7 @@ import Ouroboros.Consensus.ByronSpec.Ledger.Orphans () forgeByronSpecBlock :: BlockNo -> SlotNo - -> Ticked (LedgerState ByronSpecBlock) + -> Ticked1 (LedgerState ByronSpecBlock) mk -> [Validated (GenTx ByronSpecBlock)] -> Spec.VKey -> ByronSpecBlock diff --git a/ouroboros-consensus-cardano/src/unstable-byronspec/Ouroboros/Consensus/ByronSpec/Ledger/Ledger.hs b/ouroboros-consensus-cardano/src/unstable-byronspec/Ouroboros/Consensus/ByronSpec/Ledger/Ledger.hs index 685056d3f3..e31331ec7d 100644 --- a/ouroboros-consensus-cardano/src/unstable-byronspec/Ouroboros/Consensus/ByronSpec/Ledger/Ledger.hs +++ b/ouroboros-consensus-cardano/src/unstable-byronspec/Ouroboros/Consensus/ByronSpec/Ledger/Ledger.hs @@ -1,3 +1,4 @@ +{-# LANGUAGE DataKinds #-} {-# LANGUAGE DeriveAnyClass #-} {-# LANGUAGE DeriveGeneric #-} {-# LANGUAGE DerivingVia #-} @@ -12,7 +13,8 @@ module Ouroboros.Consensus.ByronSpec.Ledger.Ledger ( , initByronSpecLedgerState -- * Type family instances , LedgerState (..) - , Ticked (..) + , LedgerTables (..) + , Ticked1 (..) ) where import qualified Byron.Spec.Chain.STS.Rule.Chain as Spec @@ -21,6 +23,7 @@ import Codec.Serialise import Control.Monad.Except import qualified Control.State.Transition as Spec import Data.List.NonEmpty (NonEmpty) +import Data.Void (Void) import GHC.Generics (Generic) import NoThunks.Class (AllowThunk (..), NoThunks) import Ouroboros.Consensus.Block @@ -30,16 +33,21 @@ import Ouroboros.Consensus.ByronSpec.Ledger.Conversions import Ouroboros.Consensus.ByronSpec.Ledger.Genesis (ByronSpecGenesis) import Ouroboros.Consensus.ByronSpec.Ledger.Orphans () import qualified Ouroboros.Consensus.ByronSpec.Ledger.Rules as Rules -import Ouroboros.Consensus.Ledger.Abstract +import Ouroboros.Consensus.Ledger.Abstract (ApplyBlock (..), + CanSerializeLedgerTables, CanStowLedgerTables, GetTip (..), + HasLedgerTables, IsLedger (..), Key, LedgerCfg, + LedgerState, LedgerTables (..), + LedgerTablesAreTrivial (..), UpdateLedger, Value, + VoidLedgerEvent, pureLedgerResult, (..:)) import Ouroboros.Consensus.Ledger.CommonProtocolParams +import Ouroboros.Consensus.Ledger.Tables.Utils import Ouroboros.Consensus.Ticked -import Ouroboros.Consensus.Util ((..:)) {------------------------------------------------------------------------------- State -------------------------------------------------------------------------------} -data instance LedgerState ByronSpecBlock = ByronSpecLedgerState { +data instance LedgerState ByronSpecBlock mk = ByronSpecLedgerState { -- | Tip of the ledger (most recently applied block, if any) -- -- The spec state stores the last applied /hash/, but not the /slot/. @@ -50,7 +58,7 @@ data instance LedgerState ByronSpecBlock = ByronSpecLedgerState { } deriving stock (Show, Eq, Generic) deriving anyclass (Serialise) - deriving NoThunks via AllowThunk (LedgerState ByronSpecBlock) + deriving NoThunks via AllowThunk (LedgerState ByronSpecBlock mk) newtype ByronSpecLedgerError = ByronSpecLedgerError { unByronSpecLedgerError :: NonEmpty (Spec.PredicateFailure Spec.CHAIN) @@ -62,7 +70,7 @@ type instance LedgerCfg (LedgerState ByronSpecBlock) = ByronSpecGenesis instance UpdateLedger ByronSpecBlock -initByronSpecLedgerState :: ByronSpecGenesis -> LedgerState ByronSpecBlock +initByronSpecLedgerState :: ByronSpecGenesis -> LedgerState ByronSpecBlock mk initByronSpecLedgerState cfg = ByronSpecLedgerState { byronSpecLedgerTip = Nothing , byronSpecLedgerState = Rules.initStateCHAIN cfg @@ -76,7 +84,7 @@ instance GetTip (LedgerState ByronSpecBlock) where getTip (ByronSpecLedgerState tip state) = castPoint $ getByronSpecTip tip state -instance GetTip (Ticked (LedgerState ByronSpecBlock)) where +instance GetTip (Ticked1 (LedgerState ByronSpecBlock)) where getTip (TickedByronSpecLedgerState tip state) = castPoint $ getByronSpecTip tip state @@ -90,12 +98,12 @@ getByronSpecTip (Just slot) state = BlockPoint Ticking -------------------------------------------------------------------------------} -data instance Ticked (LedgerState ByronSpecBlock) = TickedByronSpecLedgerState { +data instance Ticked1 (LedgerState ByronSpecBlock) mk = TickedByronSpecLedgerState { untickedByronSpecLedgerTip :: Maybe SlotNo , tickedByronSpecLedgerState :: Spec.State Spec.CHAIN } deriving stock (Show, Eq) - deriving NoThunks via AllowThunk (Ticked (LedgerState ByronSpecBlock)) + deriving NoThunks via AllowThunk (Ticked1 (LedgerState ByronSpecBlock) mk) instance IsLedger (LedgerState ByronSpecBlock) where type LedgerErr (LedgerState ByronSpecBlock) = ByronSpecLedgerError @@ -113,6 +121,23 @@ instance IsLedger (LedgerState ByronSpecBlock) where state } +{------------------------------------------------------------------------------- + Ledger Tables +-------------------------------------------------------------------------------} + +type instance Key (LedgerState ByronSpecBlock) = Void +type instance Value (LedgerState ByronSpecBlock) = Void +instance HasLedgerTables (LedgerState ByronSpecBlock) +instance HasLedgerTables (Ticked1 (LedgerState ByronSpecBlock)) +instance CanSerializeLedgerTables (LedgerState ByronSpecBlock) +instance LedgerTablesAreTrivial (LedgerState ByronSpecBlock) where + convertMapKind (ByronSpecLedgerState x y) = + ByronSpecLedgerState x y +instance LedgerTablesAreTrivial (Ticked1 (LedgerState ByronSpecBlock)) where + convertMapKind (TickedByronSpecLedgerState x y) = + TickedByronSpecLedgerState x y +instance CanStowLedgerTables (LedgerState ByronSpecBlock) + {------------------------------------------------------------------------------- Applying blocks -------------------------------------------------------------------------------} @@ -140,6 +165,8 @@ instance ApplyBlock (LedgerState ByronSpecBlock) ByronSpecBlock where Left _ -> error "reapplyBlockLedgerResult: unexpected error" Right b -> b + getBlockKeySets _ = emptyLedgerTables + {------------------------------------------------------------------------------- CommonProtocolParams -------------------------------------------------------------------------------} @@ -148,7 +175,7 @@ instance CommonProtocolParams ByronSpecBlock where maxHeaderSize = fromIntegral . Spec._maxHdrSz . getPParams maxTxSize = fromIntegral . Spec._maxTxSz . getPParams -getPParams :: LedgerState ByronSpecBlock -> Spec.PParams +getPParams :: LedgerState ByronSpecBlock mk -> Spec.PParams getPParams = Spec.protocolParameters . getChainStateUPIState diff --git a/ouroboros-consensus-cardano/src/unstable-byronspec/Ouroboros/Consensus/ByronSpec/Ledger/Mempool.hs b/ouroboros-consensus-cardano/src/unstable-byronspec/Ouroboros/Consensus/ByronSpec/Ledger/Mempool.hs index 24f9e2453b..ec31ee27d1 100644 --- a/ouroboros-consensus-cardano/src/unstable-byronspec/Ouroboros/Consensus/ByronSpec/Ledger/Mempool.hs +++ b/ouroboros-consensus-cardano/src/unstable-byronspec/Ouroboros/Consensus/ByronSpec/Ledger/Mempool.hs @@ -1,6 +1,7 @@ {-# LANGUAGE DeriveAnyClass #-} {-# LANGUAGE DeriveGeneric #-} {-# LANGUAGE DerivingVia #-} +{-# LANGUAGE GeneralizedNewtypeDeriving #-} {-# LANGUAGE TypeFamilies #-} {-# OPTIONS_GHC -Wno-orphans #-} @@ -21,6 +22,7 @@ import qualified Ouroboros.Consensus.ByronSpec.Ledger.GenTx as GenTx import Ouroboros.Consensus.ByronSpec.Ledger.Ledger import Ouroboros.Consensus.ByronSpec.Ledger.Orphans () import Ouroboros.Consensus.Ledger.SupportsMempool +import Ouroboros.Consensus.Ledger.Tables.Utils newtype instance GenTx ByronSpecBlock = ByronSpecGenTx { unByronSpecGenTx :: ByronSpecGenTx @@ -37,6 +39,11 @@ newtype instance Validated (GenTx ByronSpecBlock) = ValidatedByronSpecGenTx { type instance ApplyTxErr ByronSpecBlock = ByronSpecGenTxErr +-- | This data family instance is not used anywhere but still required by the +-- instance of @LedgerSupportsMempool ByronSpecBlock@ +newtype instance TxId (GenTx ByronSpecBlock) = TxId Int + deriving newtype NoThunks + instance LedgerSupportsMempool ByronSpecBlock where applyTx cfg _wti _slot tx (TickedByronSpecLedgerState tip st) = fmap (\st' -> @@ -48,11 +55,13 @@ instance LedgerSupportsMempool ByronSpecBlock where -- Byron spec doesn't have multiple validation modes reapplyTx cfg slot vtx st = - fmap fst - $ applyTx cfg DoNotIntervene slot (forgetValidatedByronSpecGenTx vtx) st + applyDiffs st . fst + <$> applyTx cfg DoNotIntervene slot (forgetValidatedByronSpecGenTx vtx) st txForgetValidated = forgetValidatedByronSpecGenTx + getTransactionKeySets _ = emptyLedgerTables + instance TxLimits ByronSpecBlock where type TxMeasure ByronSpecBlock = IgnoringOverflow ByteSize32 diff --git a/ouroboros-consensus-cardano/src/unstable-cardano-testlib/Test/Consensus/Cardano/Examples.hs b/ouroboros-consensus-cardano/src/unstable-cardano-testlib/Test/Consensus/Cardano/Examples.hs index 95038ff6cf..6eb1d0e056 100644 --- a/ouroboros-consensus-cardano/src/unstable-cardano-testlib/Test/Consensus/Cardano/Examples.hs +++ b/ouroboros-consensus-cardano/src/unstable-cardano-testlib/Test/Consensus/Cardano/Examples.hs @@ -1,5 +1,8 @@ {-# LANGUAGE CPP #-} +{-# LANGUAGE DataKinds #-} +{-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE OverloadedStrings #-} +{-# LANGUAGE RankNTypes #-} {-# LANGUAGE RecordWildCards #-} {-# LANGUAGE ScopedTypeVariables #-} {-# LANGUAGE TypeApplications #-} @@ -30,6 +33,7 @@ module Test.Consensus.Cardano.Examples ( import Data.Coerce (Coercible) import Data.SOP.BasicFunctors import Data.SOP.Counting (Exactly (..)) +import Data.SOP.Functors (Flip (..)) import Data.SOP.Index (Index (..)) import Data.SOP.Strict import Ouroboros.Consensus.Block @@ -37,13 +41,17 @@ import Ouroboros.Consensus.Byron.Ledger (ByronBlock) import qualified Ouroboros.Consensus.Byron.Ledger as Byron import Ouroboros.Consensus.Cardano.Block import Ouroboros.Consensus.Cardano.CanHardFork () +import Ouroboros.Consensus.Cardano.Ledger () import Ouroboros.Consensus.HardFork.Combinator import Ouroboros.Consensus.HardFork.Combinator.Embed.Nary import qualified Ouroboros.Consensus.HardFork.Combinator.State as State import qualified Ouroboros.Consensus.HardFork.History as History import Ouroboros.Consensus.HeaderValidation (AnnTip) -import Ouroboros.Consensus.Ledger.Extended (ExtLedgerState (..)) +import Ouroboros.Consensus.Ledger.Extended +import Ouroboros.Consensus.Ledger.Query import Ouroboros.Consensus.Ledger.SupportsMempool (ApplyTxErr) +import Ouroboros.Consensus.Ledger.Tables (EmptyMK, ValuesMK, + castLedgerTables) import Ouroboros.Consensus.Protocol.TPraos (TPraos) import Ouroboros.Consensus.Shelley.Ledger (ShelleyBlock) import qualified Ouroboros.Consensus.Shelley.Ledger as Shelley @@ -113,21 +121,22 @@ instance Inject SomeResult where instance Inject Examples where inject startBounds (idx :: Index xs x) Examples {..} = Examples { - exampleBlock = inj (Proxy @I) exampleBlock - , exampleSerialisedBlock = inj (Proxy @Serialised) exampleSerialisedBlock - , exampleHeader = inj (Proxy @Header) exampleHeader - , exampleSerialisedHeader = inj (Proxy @SerialisedHeader) exampleSerialisedHeader - , exampleHeaderHash = inj (Proxy @WrapHeaderHash) exampleHeaderHash - , exampleGenTx = inj (Proxy @GenTx) exampleGenTx - , exampleGenTxId = inj (Proxy @WrapGenTxId) exampleGenTxId - , exampleApplyTxErr = inj (Proxy @WrapApplyTxErr) exampleApplyTxErr - , exampleQuery = inj (Proxy @(SomeSecond BlockQuery)) exampleQuery - , exampleResult = inj (Proxy @SomeResult) exampleResult - , exampleAnnTip = inj (Proxy @AnnTip) exampleAnnTip - , exampleLedgerState = inj (Proxy @LedgerState) exampleLedgerState - , exampleChainDepState = inj (Proxy @WrapChainDepState) exampleChainDepState - , exampleExtLedgerState = inj (Proxy @ExtLedgerState) exampleExtLedgerState - , exampleSlotNo = exampleSlotNo + exampleBlock = inj (Proxy @I) exampleBlock + , exampleSerialisedBlock = inj (Proxy @Serialised) exampleSerialisedBlock + , exampleHeader = inj (Proxy @Header) exampleHeader + , exampleSerialisedHeader = inj (Proxy @SerialisedHeader) exampleSerialisedHeader + , exampleHeaderHash = inj (Proxy @WrapHeaderHash) exampleHeaderHash + , exampleGenTx = inj (Proxy @GenTx) exampleGenTx + , exampleGenTxId = inj (Proxy @WrapGenTxId) exampleGenTxId + , exampleApplyTxErr = inj (Proxy @WrapApplyTxErr) exampleApplyTxErr + , exampleQuery = inj (Proxy @(SomeBlockQuery :.: BlockQuery)) exampleQuery + , exampleResult = inj (Proxy @SomeResult) exampleResult + , exampleAnnTip = inj (Proxy @AnnTip) exampleAnnTip + , exampleLedgerState = inj (Proxy @(Flip LedgerState EmptyMK)) exampleLedgerState + , exampleChainDepState = inj (Proxy @WrapChainDepState) exampleChainDepState + , exampleExtLedgerState = inj (Proxy @(Flip ExtLedgerState EmptyMK)) exampleExtLedgerState + , exampleSlotNo = exampleSlotNo + , exampleLedgerTables = inj (Proxy @WrapLedgerTables) exampleLedgerTables } where inj :: @@ -139,6 +148,14 @@ instance Inject Examples where => Proxy f -> Labelled a -> Labelled b inj p = fmap (fmap (inject' p startBounds idx)) +-- | This wrapper is used only in the 'Example' instance of 'Inject' so that we +-- can use a type that matches the kind expected by 'inj'. +newtype WrapLedgerTables blk = WrapLedgerTables ( LedgerTables (ExtLedgerState blk) ValuesMK ) + +instance Inject WrapLedgerTables where + inject _startBounds idx (WrapLedgerTables lt) = + WrapLedgerTables $ castLedgerTables $ injectLedgerTables idx (castLedgerTables lt) + {------------------------------------------------------------------------------- Setup -------------------------------------------------------------------------------} @@ -264,14 +281,14 @@ codecConfig = Shelley.ShelleyCodecConfig ledgerStateByron :: - LedgerState ByronBlock - -> LedgerState (CardanoBlock Crypto) + LedgerState ByronBlock mk + -> LedgerState (CardanoBlock Crypto) mk ledgerStateByron stByron = HardForkLedgerState $ HardForkState $ TZ cur where cur = State.Current { currentStart = History.initBound - , currentState = stByron + , currentState = Flip stByron } {------------------------------------------------------------------------------- @@ -322,25 +339,25 @@ exampleApplyTxErrWrongEraShelley :: ApplyTxErr (CardanoBlock Crypto) exampleApplyTxErrWrongEraShelley = HardForkApplyTxErrWrongEra exampleEraMismatchShelley -exampleQueryEraMismatchByron :: SomeSecond BlockQuery (CardanoBlock Crypto) +exampleQueryEraMismatchByron :: SomeBlockQuery (BlockQuery (CardanoBlock Crypto)) exampleQueryEraMismatchByron = - SomeSecond (QueryIfCurrentShelley Shelley.GetLedgerTip) + SomeBlockQuery (QueryIfCurrentShelley Shelley.GetLedgerTip) -exampleQueryEraMismatchShelley :: SomeSecond BlockQuery (CardanoBlock Crypto) +exampleQueryEraMismatchShelley :: SomeBlockQuery (BlockQuery (CardanoBlock Crypto)) exampleQueryEraMismatchShelley = - SomeSecond (QueryIfCurrentByron Byron.GetUpdateInterfaceState) + SomeBlockQuery (QueryIfCurrentByron Byron.GetUpdateInterfaceState) -exampleQueryAnytimeByron :: SomeSecond BlockQuery (CardanoBlock Crypto) +exampleQueryAnytimeByron :: SomeBlockQuery (BlockQuery (CardanoBlock Crypto)) exampleQueryAnytimeByron = - SomeSecond (QueryAnytimeByron GetEraStart) + SomeBlockQuery (QueryAnytimeByron GetEraStart) -exampleQueryAnytimeShelley :: SomeSecond BlockQuery (CardanoBlock Crypto) +exampleQueryAnytimeShelley :: SomeBlockQuery (BlockQuery (CardanoBlock Crypto)) exampleQueryAnytimeShelley = - SomeSecond (QueryAnytimeShelley GetEraStart) + SomeBlockQuery (QueryAnytimeShelley GetEraStart) -exampleQueryHardFork :: SomeSecond BlockQuery (CardanoBlock Crypto) +exampleQueryHardFork :: SomeBlockQuery (BlockQuery (CardanoBlock Crypto)) exampleQueryHardFork = - SomeSecond (QueryHardFork GetInterpreter) + SomeBlockQuery (QueryHardFork GetInterpreter) exampleResultEraMismatchByron :: SomeResult (CardanoBlock Crypto) exampleResultEraMismatchByron = diff --git a/ouroboros-consensus-cardano/src/unstable-cardano-testlib/Test/Consensus/Cardano/Generators.hs b/ouroboros-consensus-cardano/src/unstable-cardano-testlib/Test/Consensus/Cardano/Generators.hs index ee869afc94..a5f1730eed 100644 --- a/ouroboros-consensus-cardano/src/unstable-cardano-testlib/Test/Consensus/Cardano/Generators.hs +++ b/ouroboros-consensus-cardano/src/unstable-cardano-testlib/Test/Consensus/Cardano/Generators.hs @@ -39,9 +39,11 @@ import Ouroboros.Consensus.HardFork.Combinator import Ouroboros.Consensus.HardFork.Combinator.Serialisation import qualified Ouroboros.Consensus.HardFork.History as History import Ouroboros.Consensus.HeaderValidation +import Ouroboros.Consensus.Ledger.Query import Ouroboros.Consensus.Node.NetworkProtocolVersion import Ouroboros.Consensus.Node.Serialisation (Some (..)) import Ouroboros.Consensus.Protocol.TPraos (TPraos) +import Ouroboros.Consensus.Shelley.HFEras () import Ouroboros.Consensus.Shelley.Ledger import Ouroboros.Consensus.Shelley.Ledger.Block () import Ouroboros.Consensus.Shelley.Ledger.SupportsProtocol () @@ -495,7 +497,7 @@ instance CardanoHardForkConstraints c instance c ~ MockCryptoCompatByron => Arbitrary (WithVersion (HardForkNodeToClientVersion (CardanoEras c)) - (SomeSecond BlockQuery (CardanoBlock c))) where + (SomeBlockQuery (BlockQuery (CardanoBlock c)))) where arbitrary = frequency [ (1, arbitraryNodeToClient injByron injShelley injAllegra injMary injAlonzo injBabbage injConway) , (1, WithVersion @@ -522,21 +524,21 @@ instance c ~ MockCryptoCompatByron , (1, fmap injHardFork <$> arbitrary) ] where - injByron (SomeSecond query) = SomeSecond (QueryIfCurrentByron query) - injShelley (SomeSecond query) = SomeSecond (QueryIfCurrentShelley query) - injAllegra (SomeSecond query) = SomeSecond (QueryIfCurrentAllegra query) - injMary (SomeSecond query) = SomeSecond (QueryIfCurrentMary query) - injAlonzo (SomeSecond query) = SomeSecond (QueryIfCurrentAlonzo query) - injBabbage (SomeSecond query) = SomeSecond (QueryIfCurrentBabbage query) - injConway (SomeSecond query) = SomeSecond (QueryIfCurrentConway query) - injAnytimeByron (Some query) = SomeSecond (QueryAnytimeByron query) - injAnytimeShelley (Some query) = SomeSecond (QueryAnytimeShelley query) - injAnytimeAllegra (Some query) = SomeSecond (QueryAnytimeAllegra query) - injAnytimeMary (Some query) = SomeSecond (QueryAnytimeMary query) - injAnytimeAlonzo (Some query) = SomeSecond (QueryAnytimeAlonzo query) - injAnytimeBabbage (Some query) = SomeSecond (QueryAnytimeBabbage query) - injAnytimeConway (Some query) = SomeSecond (QueryAnytimeConway query) - injHardFork (Some query) = SomeSecond (QueryHardFork query) + injByron (SomeBlockQuery query) = SomeBlockQuery (QueryIfCurrentByron query) + injShelley (SomeBlockQuery query) = SomeBlockQuery (QueryIfCurrentShelley query) + injAllegra (SomeBlockQuery query) = SomeBlockQuery (QueryIfCurrentAllegra query) + injMary (SomeBlockQuery query) = SomeBlockQuery (QueryIfCurrentMary query) + injAlonzo (SomeBlockQuery query) = SomeBlockQuery (QueryIfCurrentAlonzo query) + injBabbage (SomeBlockQuery query) = SomeBlockQuery (QueryIfCurrentBabbage query) + injConway (SomeBlockQuery query) = SomeBlockQuery (QueryIfCurrentConway query) + injAnytimeByron (Some query) = SomeBlockQuery (QueryAnytimeByron query) + injAnytimeShelley (Some query) = SomeBlockQuery (QueryAnytimeShelley query) + injAnytimeAllegra (Some query) = SomeBlockQuery (QueryAnytimeAllegra query) + injAnytimeMary (Some query) = SomeBlockQuery (QueryAnytimeMary query) + injAnytimeAlonzo (Some query) = SomeBlockQuery (QueryAnytimeAlonzo query) + injAnytimeBabbage (Some query) = SomeBlockQuery (QueryAnytimeBabbage query) + injAnytimeConway (Some query) = SomeBlockQuery (QueryAnytimeConway query) + injHardFork (Some query) = SomeBlockQuery (QueryHardFork query) instance Arbitrary History.EraEnd where arbitrary = oneof diff --git a/ouroboros-consensus-cardano/src/unstable-cardano-testlib/Test/ThreadNet/Infra/ShelleyBasedHardFork.hs b/ouroboros-consensus-cardano/src/unstable-cardano-testlib/Test/ThreadNet/Infra/ShelleyBasedHardFork.hs index d4961835e0..1f4b34496d 100644 --- a/ouroboros-consensus-cardano/src/unstable-cardano-testlib/Test/ThreadNet/Infra/ShelleyBasedHardFork.hs +++ b/ouroboros-consensus-cardano/src/unstable-cardano-testlib/Test/ThreadNet/Infra/ShelleyBasedHardFork.hs @@ -1,12 +1,16 @@ {-# LANGUAGE ConstraintKinds #-} {-# LANGUAGE DataKinds #-} +{-# LANGUAGE DerivingStrategies #-} +{-# LANGUAGE EmptyCase #-} {-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE FlexibleInstances #-} +{-# LANGUAGE GeneralisedNewtypeDeriving #-} {-# LANGUAGE LambdaCase #-} {-# LANGUAGE MultiParamTypeClasses #-} {-# LANGUAGE NamedFieldPuns #-} {-# LANGUAGE PatternSynonyms #-} {-# LANGUAGE ScopedTypeVariables #-} +{-# LANGUAGE TypeApplications #-} {-# LANGUAGE TypeFamilies #-} {-# LANGUAGE TypeOperators #-} {-# LANGUAGE UndecidableInstances #-} @@ -25,19 +29,25 @@ module Test.ThreadNet.Infra.ShelleyBasedHardFork ( -- * Node , ShelleyBasedHardForkConstraints , protocolInfoShelleyBasedHardFork + -- * Data families + , LedgerTables (..) ) where import qualified Cardano.Ledger.Api.Transition as L +import qualified Cardano.Ledger.Core as Core import qualified Cardano.Ledger.Era as SL import qualified Cardano.Ledger.Shelley.API as SL import Control.Monad.Except (runExcept) import qualified Data.Map.Strict as Map import Data.SOP.BasicFunctors +import Data.SOP.Functors (Flip (..)) +import Data.SOP.Index (Index (..)) import qualified Data.SOP.InPairs as InPairs -import Data.SOP.Strict +import Data.SOP.Strict (NP (..), NS (..)) import qualified Data.SOP.Tails as Tails import Data.Void (Void) import Lens.Micro ((^.)) +import NoThunks.Class (NoThunks) import Ouroboros.Consensus.Block.Forging (BlockForging) import Ouroboros.Consensus.Cardano.CanHardFork (ShelleyPartialLedgerConfig (..), @@ -47,12 +57,13 @@ import Ouroboros.Consensus.Cardano.Node (TriggerHardFork (..)) import Ouroboros.Consensus.HardFork.Combinator import Ouroboros.Consensus.HardFork.Combinator.Embed.Binary import Ouroboros.Consensus.HardFork.Combinator.Serialisation -import qualified Ouroboros.Consensus.HardFork.Combinator.State.Types as HFC +import Ouroboros.Consensus.HardFork.Combinator.State.Types as HFC import qualified Ouroboros.Consensus.HardFork.History as History -import Ouroboros.Consensus.Ledger.Basics (LedgerConfig) +import Ouroboros.Consensus.Ledger.Abstract import Ouroboros.Consensus.Ledger.SupportsMempool import Ouroboros.Consensus.Ledger.SupportsProtocol (LedgerSupportsProtocol) +import Ouroboros.Consensus.Ledger.Tables.Utils import Ouroboros.Consensus.Node import Ouroboros.Consensus.Node.NetworkProtocolVersion import Ouroboros.Consensus.Protocol.TPraos @@ -174,6 +185,7 @@ instance ShelleyBasedHardForkConstraints proto1 era1 proto2 era2 hardForkEraTranslation = EraTranslation { translateLedgerState = PCons translateLedgerState PNil + , translateLedgerTables = PCons translateLedgerTables PNil , translateChainDepState = PCons translateChainDepStateAcrossShelley PNil , crossEraForecast = PCons crossEraForecastAcrossShelley PNil } @@ -181,17 +193,31 @@ instance ShelleyBasedHardForkConstraints proto1 era1 proto2 era2 translateLedgerState :: InPairs.RequiringBoth WrapLedgerConfig - (HFC.Translate LedgerState) + TranslateLedgerState (ShelleyBlock proto1 era1) (ShelleyBlock proto2 era2) translateLedgerState = InPairs.RequireBoth - $ \_cfg1 cfg2 -> HFC.Translate - $ \_epochNo -> - unComp - . SL.translateEra' - (shelleyLedgerTranslationContext (unwrapLedgerConfig cfg2)) - . Comp + $ \_cfg1 cfg2 -> + HFC.TranslateLedgerState { + translateLedgerStateWith = \_epochNo -> + noNewTickingDiffs + . unFlip + . unComp + . SL.translateEra' + (shelleyLedgerTranslationContext (unwrapLedgerConfig cfg2)) + . Comp + . Flip + } + + translateLedgerTables :: + TranslateLedgerTables + (ShelleyBlock proto1 era1) + (ShelleyBlock proto2 era2) + translateLedgerTables = HFC.TranslateLedgerTables { + translateTxInWith = id + , translateTxOutWith = Core.upgradeTxOut + } hardForkChainSel = Tails.mk2 CompareSameSelectView @@ -238,6 +264,42 @@ instance ShelleyBasedHardForkConstraints proto1 era1 proto2 era2 latestReleasedNodeVersion = latestReleasedNodeVersionDefault +{------------------------------------------------------------------------------- + Query HF +-------------------------------------------------------------------------------} + +instance ShelleyBasedHardForkConstraints proto1 era1 proto2 era2 + => BlockSupportsHFLedgerQuery '[ShelleyBlock proto1 era1, ShelleyBlock proto2 era2] where + answerBlockQueryHFLookup idx@IZ cfg q dlv = + answerShelleyLookupQueries idx cfg q dlv + answerBlockQueryHFLookup idx@(IS IZ) cfg q dlv = + answerShelleyLookupQueries idx cfg q dlv + answerBlockQueryHFLookup (IS (IS idx)) _cfg _q _dlv = + case idx of {} + + answerBlockQueryHFTraverse idx@IZ cfg q dlv = + answerShelleyTraversingQueries idx cfg q dlv + answerBlockQueryHFTraverse idx@(IS IZ) cfg q dlv = + answerShelleyTraversingQueries idx cfg q dlv + answerBlockQueryHFTraverse (IS (IS idx)) _cfg _q _dlv = + case idx of {} + + queryLedgerGetTraversingFilter idx@IZ q = case q of + GetUTxOByAddress addrs -> \case + Z (WrapTxOut x) -> filterGetUTxOByAddressOne addrs x + S (Z (WrapTxOut x)) -> filterGetUTxOByAddressOne addrs x + GetUTxOWhole -> + const True + GetCBOR q' -> queryLedgerGetTraversingFilter idx q' + queryLedgerGetTraversingFilter idx@(IS IZ) q = case q of + GetUTxOByAddress addrs -> \case + Z (WrapTxOut x) -> filterGetUTxOByAddressOne addrs x + S (Z (WrapTxOut x)) -> filterGetUTxOByAddressOne addrs x + GetUTxOWhole -> + const True + GetCBOR q' -> queryLedgerGetTraversingFilter idx q' + queryLedgerGetTraversingFilter (IS (IS idx)) _q = case idx of {} + {------------------------------------------------------------------------------- Protocol info -------------------------------------------------------------------------------} @@ -337,3 +399,40 @@ instance ( TxGen (ShelleyBlock proto1 era1) type TxGenExtra (ShelleyBasedHardForkBlock proto1 era1 proto2 era2) = NP WrapTxGenExtra (ShelleyBasedHardForkEras proto1 era1 proto2 era2) testGenTxs = testGenTxsHfc + +{------------------------------------------------------------------------------- + Canonical TxIn +-------------------------------------------------------------------------------} + +instance ShelleyBasedHardForkConstraints proto1 era1 proto2 era2 + => HasCanonicalTxIn (ShelleyBasedHardForkEras proto1 era1 proto2 era2) where + newtype instance CanonicalTxIn (ShelleyBasedHardForkEras proto1 era1 proto2 era2) = + ShelleyHFCTxIn { + getShelleyHFCTxIn :: SL.TxIn (EraCrypto era1) + } + deriving stock (Show, Eq, Ord) + deriving newtype NoThunks + + injectCanonicalTxIn IZ txIn = ShelleyHFCTxIn txIn + injectCanonicalTxIn (IS IZ) txIn = ShelleyHFCTxIn txIn + injectCanonicalTxIn (IS (IS idx')) _ = case idx' of {} + + distribCanonicalTxIn IZ txIn = getShelleyHFCTxIn txIn + distribCanonicalTxIn (IS IZ) txIn = getShelleyHFCTxIn txIn + distribCanonicalTxIn (IS (IS idx')) _ = case idx' of {} + + encodeCanonicalTxIn = Core.toEraCBOR @era1 . getShelleyHFCTxIn + + decodeCanonicalTxIn = ShelleyHFCTxIn <$> Core.fromEraCBOR @era1 + +instance CanHardFork (ShelleyBasedHardForkEras proto1 era1 proto2 era2) + => HasHardForkTxOut (ShelleyBasedHardForkEras proto1 era1 proto2 era2) where + type instance HardForkTxOut (ShelleyBasedHardForkEras proto1 era1 proto2 era2) = + DefaultHardForkTxOut (ShelleyBasedHardForkEras proto1 era1 proto2 era2) + injectHardForkTxOut = injectHardForkTxOutDefault + distribHardForkTxOut = distribHardForkTxOutDefault + +instance ShelleyBasedHardForkConstraints proto1 era1 proto2 era2 + => SerializeHardForkTxOut (ShelleyBasedHardForkEras proto1 era1 proto2 era2) where + encodeHardForkTxOut _ = encodeHardForkTxOutDefault + decodeHardForkTxOut _ = decodeHardForkTxOutDefault diff --git a/ouroboros-consensus-cardano/src/unstable-cardano-testlib/Test/ThreadNet/TxGen/Cardano.hs b/ouroboros-consensus-cardano/src/unstable-cardano-testlib/Test/ThreadNet/TxGen/Cardano.hs index 961a7b5555..cdd0dbd14c 100644 --- a/ouroboros-consensus-cardano/src/unstable-cardano-testlib/Test/ThreadNet/TxGen/Cardano.hs +++ b/ouroboros-consensus-cardano/src/unstable-cardano-testlib/Test/ThreadNet/TxGen/Cardano.hs @@ -27,7 +27,6 @@ import qualified Data.Map.Strict as Map import Data.Maybe (maybeToList) import qualified Data.Sequence.Strict as StrictSeq import qualified Data.Set as Set -import Data.SOP.BasicFunctors import Data.SOP.Strict import Data.SOP.Telescope as Tele import Lens.Micro @@ -38,16 +37,19 @@ import Ouroboros.Consensus.Cardano.Block (CardanoEras, GenTx (..), import Ouroboros.Consensus.Cardano.Node (CardanoHardForkConstraints) import Ouroboros.Consensus.Config import Ouroboros.Consensus.HardFork.Combinator.Ledger - (tickedHardForkLedgerStatePerEra) + (getFlipTickedLedgerState, tickedHardForkLedgerStatePerEra) import Ouroboros.Consensus.HardFork.Combinator.State.Types (currentState, getHardForkState) import Ouroboros.Consensus.Ledger.Basics (LedgerConfig, LedgerState, - applyChainTick) + TickedLedgerState, applyChainTick) +import Ouroboros.Consensus.Ledger.Tables (ValuesMK) +import Ouroboros.Consensus.Ledger.Tables.Utils (applyDiffs, + forgetLedgerTables) import Ouroboros.Consensus.NodeId (CoreNodeId (..)) import Ouroboros.Consensus.Protocol.TPraos (TPraos) import Ouroboros.Consensus.Shelley.Ledger (ShelleyBlock, mkShelleyTx) -import Ouroboros.Consensus.Shelley.Ledger.Ledger (Ticked, - tickedShelleyLedgerState) +import Ouroboros.Consensus.Shelley.Ledger.Ledger + (tickedShelleyLedgerState) import qualified Test.Cardano.Ledger.Core.KeyPair as TL (mkWitnessVKey) import qualified Test.ThreadNet.Infra.Shelley as Shelley import Test.ThreadNet.TxGen @@ -128,7 +130,7 @@ migrateUTxO :: => MigrationInfo c -> SlotNo -> LedgerConfig (CardanoBlock c) - -> LedgerState (CardanoBlock c) + -> LedgerState (CardanoBlock c) ValuesMK -> Maybe (GenTx (CardanoBlock c)) migrateUTxO migrationInfo curSlot lcfg lst | Just utxo <- mbUTxO = @@ -209,10 +211,12 @@ migrateUTxO migrationInfo curSlot lcfg lst where mbUTxO :: Maybe (SL.UTxO (ShelleyEra c)) mbUTxO = - fmap getUTxOShelley $ - ejectShelleyTickedLedgerState $ - applyChainTick lcfg curSlot $ - lst + fmap getUTxOShelley + . ejectShelleyTickedLedgerState + . applyDiffs lst + . applyChainTick lcfg curSlot + . forgetLedgerTables + $ lst MigrationInfo { byronMagic @@ -259,7 +263,7 @@ ejectShelleyNS = \case S (Z x) -> Just x _ -> Nothing -getUTxOShelley :: Ticked (LedgerState (ShelleyBlock proto era)) +getUTxOShelley :: TickedLedgerState (ShelleyBlock proto era) mk -> SL.UTxO era getUTxOShelley tls = SL.utxosUtxo $ @@ -269,10 +273,10 @@ getUTxOShelley tls = tickedShelleyLedgerState tls ejectShelleyTickedLedgerState :: - Ticked (LedgerState (CardanoBlock c)) - -> Maybe (Ticked (LedgerState (ShelleyBlock (TPraos c) (ShelleyEra c)))) + TickedLedgerState (CardanoBlock c) mk + -> Maybe (TickedLedgerState (ShelleyBlock (TPraos c) (ShelleyEra c)) mk) ejectShelleyTickedLedgerState ls = - fmap (unComp . currentState) $ + fmap (getFlipTickedLedgerState . currentState) $ ejectShelleyNS $ Tele.tip $ getHardForkState $ diff --git a/ouroboros-consensus-cardano/src/unstable-cardano-tools/Cardano/Api/Protocol/Types.hs b/ouroboros-consensus-cardano/src/unstable-cardano-tools/Cardano/Api/Protocol/Types.hs index 84b2f11eaf..372ff0a087 100644 --- a/ouroboros-consensus-cardano/src/unstable-cardano-tools/Cardano/Api/Protocol/Types.hs +++ b/ouroboros-consensus-cardano/src/unstable-cardano-tools/Cardano/Api/Protocol/Types.hs @@ -32,6 +32,7 @@ import Ouroboros.Consensus.Node.ProtocolInfo (ProtocolClientInfo (..), import Ouroboros.Consensus.Node.Run (RunNode) import qualified Ouroboros.Consensus.Protocol.TPraos as Consensus import qualified Ouroboros.Consensus.Shelley.Eras as Consensus (ShelleyEra) +import Ouroboros.Consensus.Shelley.HFEras () import qualified Ouroboros.Consensus.Shelley.Ledger.Block as Consensus (ShelleyBlock) import Ouroboros.Consensus.Shelley.Ledger.SupportsProtocol () diff --git a/ouroboros-consensus-cardano/src/unstable-cardano-tools/Cardano/Tools/DBAnalyser/Analysis.hs b/ouroboros-consensus-cardano/src/unstable-cardano-tools/Cardano/Tools/DBAnalyser/Analysis.hs index b96b2226ba..6553b3b1d5 100644 --- a/ouroboros-consensus-cardano/src/unstable-cardano-tools/Cardano/Tools/DBAnalyser/Analysis.hs +++ b/ouroboros-consensus-cardano/src/unstable-cardano-tools/Cardano/Tools/DBAnalyser/Analysis.hs @@ -1,5 +1,6 @@ {-# LANGUAGE BangPatterns #-} {-# LANGUAGE BlockArguments #-} +{-# LANGUAGE CPP #-} {-# LANGUAGE DataKinds #-} {-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE GADTs #-} @@ -33,11 +34,13 @@ import Cardano.Tools.DBAnalyser.CSV (computeAndWriteLine, import Cardano.Tools.DBAnalyser.HasAnalysis (HasAnalysis) import qualified Cardano.Tools.DBAnalyser.HasAnalysis as HasAnalysis import Cardano.Tools.DBAnalyser.Types -import Codec.CBOR.Encoding (Encoding) import Control.Monad (unless, void, when) import Control.Monad.Except (runExcept) import Control.ResourceRegistry import Control.Tracer (Tracer (..), nullTracer, traceWith) +#if __GLASGOW_HASKELL__ < 910 +import Data.Foldable (foldl') +#endif import Data.Int (Int64) import Data.List (intercalate) import qualified Data.Map.Strict as Map @@ -53,31 +56,28 @@ import Ouroboros.Consensus.HeaderValidation (HasAnnTip (..), HeaderState (..), headerStatePoint, revalidateHeader, tickHeaderState, validateHeader) import Ouroboros.Consensus.Ledger.Abstract - (ApplyBlock (reapplyBlockLedgerResult), LedgerCfg, - LedgerConfig, applyBlockLedgerResult, applyChainTick, - tickThenApply, tickThenApplyLedgerResult, tickThenReapply) -import Ouroboros.Consensus.Ledger.Basics (LedgerResult (..), - LedgerState, getTipSlot) + (ApplyBlock (getBlockKeySets), applyBlockLedgerResult, + reapplyBlockLedgerResult, tickThenApply, + tickThenApplyLedgerResult, tickThenReapply) +import Ouroboros.Consensus.Ledger.Basics import Ouroboros.Consensus.Ledger.Extended import Ouroboros.Consensus.Ledger.SupportsMempool (LedgerSupportsMempool) import qualified Ouroboros.Consensus.Ledger.SupportsMempool as LedgerSupportsMempool import Ouroboros.Consensus.Ledger.SupportsProtocol (LedgerSupportsProtocol (..)) +import Ouroboros.Consensus.Ledger.Tables.Utils import qualified Ouroboros.Consensus.Mempool as Mempool import Ouroboros.Consensus.Protocol.Abstract (LedgerView) -import Ouroboros.Consensus.Storage.ChainDB.Impl.LgrDB - (LgrDbSerialiseConstraints) import Ouroboros.Consensus.Storage.Common (BlockComponent (..)) import Ouroboros.Consensus.Storage.ImmutableDB (ImmutableDB) import qualified Ouroboros.Consensus.Storage.ImmutableDB as ImmutableDB -import Ouroboros.Consensus.Storage.LedgerDB (DiskSnapshot (..), - writeSnapshot) -import Ouroboros.Consensus.Storage.Serialisation (encodeDisk) -import Ouroboros.Consensus.Util ((..:)) +import qualified Ouroboros.Consensus.Storage.LedgerDB as LedgerDB +import qualified Ouroboros.Consensus.Storage.LedgerDB.Impl.Snapshots as LedgerDB +import Ouroboros.Consensus.Ticked import qualified Ouroboros.Consensus.Util.IOLike as IOLike +import Ouroboros.Network.Protocol.LocalStateQuery.Type import Ouroboros.Network.SizeInBytes -import System.FS.API (SomeHasFS (..)) import qualified System.IO as IO {------------------------------------------------------------------------------- @@ -91,7 +91,7 @@ runAnalysis :: , LedgerSupportsMempool.HasTxs blk , LedgerSupportsMempool blk , LedgerSupportsProtocol blk - , LgrDbSerialiseConstraints blk + , CanStowLedgerTables (LedgerState blk) ) => AnalysisName -> SomeAnalysis blk runAnalysis analysisName = case go analysisName of @@ -128,13 +128,12 @@ data SomeAnalysis blk = => SomeAnalysis (Proxy startFrom) (Analysis blk startFrom) data AnalysisEnv m blk startFrom = AnalysisEnv { - cfg :: TopLevelConfig blk - , startFrom :: AnalysisStartFrom blk startFrom - , db :: ImmutableDB IO blk - , registry :: ResourceRegistry IO - , ledgerDbFS :: SomeHasFS IO - , limit :: Limit - , tracer :: Tracer m (TraceEvent blk) + cfg :: TopLevelConfig blk + , startFrom :: AnalysisStartFrom m blk startFrom + , db :: ImmutableDB IO blk + , registry :: ResourceRegistry IO + , limit :: Limit + , tracer :: Tracer m (TraceEvent blk) } -- | Whether the db-analyser pass needs access to a ledger state. @@ -148,16 +147,16 @@ type instance Sing = SStartFrom instance SingI StartFromPoint where sing = SStartFromPoint instance SingI StartFromLedgerState where sing = SStartFromLedgerState -data AnalysisStartFrom blk startFrom where +data AnalysisStartFrom m blk startFrom where FromPoint :: - Point blk -> AnalysisStartFrom blk StartFromPoint + Point blk -> AnalysisStartFrom m blk StartFromPoint FromLedgerState :: - ExtLedgerState blk -> AnalysisStartFrom blk StartFromLedgerState + LedgerDB.LedgerDB' m blk -> LedgerDB.TestInternals' m blk -> AnalysisStartFrom m blk StartFromLedgerState -startFromPoint :: HasAnnTip blk => AnalysisStartFrom blk startFrom -> Point blk +startFromPoint :: (IOLike.IOLike m, HasAnnTip blk) => AnalysisStartFrom m blk startFrom -> m (Point blk) startFromPoint = \case - FromPoint pt -> pt - FromLedgerState st -> headerStatePoint $ headerState st + FromPoint pt -> pure pt + FromLedgerState st _ -> headerStatePoint . headerState <$> IOLike.atomically (LedgerDB.getVolatileTip st) data TraceEvent blk = StartedEvent AnalysisName @@ -376,33 +375,40 @@ showEBBs AnalysisEnv { db, registry, startFrom, limit, tracer } = do storeLedgerStateAt :: forall blk . - ( LgrDbSerialiseConstraints blk + ( LedgerSupportsProtocol blk +#if __GLASGOW_HASKELL__ > 810 , HasAnalysis blk - , LedgerSupportsProtocol blk +#endif ) => SlotNo -> LedgerApplicationMode -> Analysis blk StartFromLedgerState storeLedgerStateAt slotNo ledgerAppMode env = do - void $ processAllUntil db registry GetBlock startFrom limit initLedger process + void $ processAllUntil db registry GetBlock startFrom limit () process pure Nothing where - AnalysisEnv { db, registry, startFrom, cfg, limit, ledgerDbFS, tracer } = env - FromLedgerState initLedger = startFrom + AnalysisEnv { db, registry, startFrom, cfg, limit, tracer } = env + FromLedgerState initLedgerDB internal = startFrom - process :: ExtLedgerState blk -> blk -> IO (NextStep, ExtLedgerState blk) - process oldLedger blk = do + process :: () -> blk -> IO (NextStep, ()) + process _ blk = do let ledgerCfg = ExtLedgerCfg cfg - case runExcept $ tickThenXApply ledgerCfg blk oldLedger of + oldLedger <- IOLike.atomically $ LedgerDB.getVolatileTip initLedgerDB + frk <- LedgerDB.getForkerAtWellKnownPoint initLedgerDB registry VolatileTip + tbs <- LedgerDB.forkerReadTables frk (getBlockKeySets blk) + LedgerDB.forkerClose frk + case runExcept $ tickThenXApply ledgerCfg blk (oldLedger `withLedgerTables` tbs) of Right newLedger -> do when (blockSlot blk >= slotNo) $ storeLedgerState newLedger when (blockSlot blk > slotNo) $ issueWarning blk when ((unBlockNo $ blockNo blk) `mod` 1000 == 0) $ reportProgress blk - return (continue blk, newLedger) + LedgerDB.reapplyThenPushNOW internal blk + LedgerDB.tryFlush initLedgerDB + return (continue blk, ()) Left err -> do traceWith tracer $ LedgerErrorEvent (blockPoint blk) err - storeLedgerState oldLedger - pure (Stop, oldLedger) + storeLedgerState (oldLedger `withLedgerTables` tbs) + pure (Stop, ()) tickThenXApply = case ledgerAppMode of LedgerReapply -> pure ..: tickThenReapply @@ -418,24 +424,16 @@ storeLedgerStateAt slotNo ledgerAppMode env = do reportProgress blk = let event = BlockSlotEvent (blockNo blk) (blockSlot blk) (blockHash blk) in traceWith tracer event - storeLedgerState :: ExtLedgerState blk -> IO () + storeLedgerState :: ExtLedgerState blk mk -> IO () storeLedgerState ledgerState = case pointSlot pt of NotOrigin slot -> do - let snapshot = DiskSnapshot (unSlotNo slot) (Just "db-analyser") - writeSnapshot ledgerDbFS encLedger snapshot ledgerState + let snapshot = LedgerDB.DiskSnapshot (unSlotNo slot) (Just "db-analyser") + LedgerDB.takeSnapshotNOW internal (Just snapshot) traceWith tracer $ SnapshotStoredEvent slot Origin -> pure () where pt = headerStatePoint $ headerState ledgerState - encLedger :: ExtLedgerState blk -> Encoding - encLedger = - let ccfg = configCodec cfg - in encodeExtLedgerState - (encodeDisk ccfg) - (encodeDisk ccfg) - (encodeDisk ccfg) - countBlocks :: forall blk . ( HasAnalysis blk @@ -455,7 +453,8 @@ countBlocks (AnalysisEnv { db, registry, startFrom, limit, tracer }) = do checkNoThunksEvery :: forall blk. ( HasAnalysis blk, - LedgerSupportsProtocol blk + LedgerSupportsProtocol blk, + CanStowLedgerTables (LedgerState blk) ) => Word64 -> Analysis blk StartFromLedgerState @@ -464,21 +463,39 @@ checkNoThunksEvery (AnalysisEnv {db, registry, startFrom, cfg, limit}) = do putStrLn $ "Checking for thunks in each block where blockNo === 0 (mod " <> show nBlocks <> ")." - void $ processAll db registry GetBlock startFrom limit initLedger process + void $ processAll db registry GetBlock startFrom limit () process pure Nothing where - FromLedgerState initLedger = startFrom - - process :: ExtLedgerState blk -> blk -> IO (ExtLedgerState blk) - process oldLedger blk = do + FromLedgerState ldb internal = startFrom + + process :: () -> blk -> IO () + process _ blk = do + oldLedger <- IOLike.atomically $ LedgerDB.getVolatileTip ldb + frk <- LedgerDB.getForkerAtWellKnownPoint ldb registry VolatileTip + tbs <- LedgerDB.forkerReadTables frk (getBlockKeySets blk) + LedgerDB.forkerClose frk + let oldLedger' = oldLedger `withLedgerTables` tbs let ledgerCfg = ExtLedgerCfg cfg - appliedResult = tickThenApplyLedgerResult ledgerCfg blk oldLedger - newLedger = either (error . show) lrResult $ runExcept $ appliedResult + appliedResult = tickThenApplyLedgerResult ledgerCfg blk oldLedger' + newLedger = either (error . show) lrResult $ runExcept appliedResult + newLedger' = applyDiffs oldLedger' newLedger bn = blockNo blk - when (unBlockNo bn `mod` nBlocks == 0 ) $ IOLike.evaluate (ledgerState newLedger) >>= checkNoThunks bn - return newLedger - - checkNoThunks :: BlockNo -> LedgerState blk -> IO () + when (unBlockNo bn `mod` nBlocks == 0 ) $ do + -- Check the new ledger state with new values stowed. This checks that + -- the ledger has no thunks in their ledgerstate type. + IOLike.evaluate (stowLedgerTables $ ledgerState newLedger') >>= checkNoThunks bn + -- Check the new ledger state with diffs in the tables. This should + -- catch any additional thunks in the diffs tables. + IOLike.evaluate (ledgerState newLedger) >>= checkNoThunks bn + -- Check the new ledger state with values in the ledger tables. This + -- should catch any additional thunks in the values tables. + IOLike.evaluate (ledgerState newLedger') >>= checkNoThunks bn + + LedgerDB.reapplyThenPushNOW internal blk + LedgerDB.tryFlush ldb + + + checkNoThunks :: NoThunksMK mk => BlockNo -> LedgerState blk mk -> IO () checkNoThunks bn ls = noThunks ["--checkThunks"] ls >>= \case Nothing -> putStrLn $ show bn <> ": no thunks found." @@ -499,24 +516,35 @@ traceLedgerProcessing :: Analysis blk StartFromLedgerState traceLedgerProcessing (AnalysisEnv {db, registry, startFrom, cfg, limit}) = do - void $ processAll db registry GetBlock startFrom limit initLedger process + void $ processAll db registry GetBlock startFrom limit () (process initLedger internal) pure Nothing where - FromLedgerState initLedger = startFrom + FromLedgerState initLedger internal = startFrom process - :: ExtLedgerState blk + :: LedgerDB.LedgerDB' IO blk + -> LedgerDB.TestInternals' IO blk + -> () -> blk - -> IO (ExtLedgerState blk) - process oldLedger blk = do + -> IO () + process ledgerDB intLedgerDB _ blk = do + frk <- LedgerDB.getForkerAtWellKnownPoint ledgerDB registry VolatileTip + oldLedgerSt <- IOLike.atomically $ LedgerDB.forkerGetLedgerState frk + oldLedgerTbs <- LedgerDB.forkerReadTables frk (getBlockKeySets blk) + let oldLedger = oldLedgerSt `withLedgerTables` oldLedgerTbs + LedgerDB.forkerClose frk + let ledgerCfg = ExtLedgerCfg cfg appliedResult = tickThenApplyLedgerResult ledgerCfg blk oldLedger - newLedger = either (error . show) lrResult $ runExcept $ appliedResult + newLedger = either (error . show) lrResult $ runExcept appliedResult + newLedger' = applyDiffs oldLedger newLedger traces = (HasAnalysis.emitTraces $ - HasAnalysis.WithLedgerState blk (ledgerState oldLedger) (ledgerState newLedger)) + HasAnalysis.WithLedgerState blk (ledgerState oldLedger) (ledgerState newLedger')) mapM_ Debug.traceMarkerIO traces - return $ newLedger + + LedgerDB.reapplyThenPushNOW intLedgerDB blk + LedgerDB.tryFlush ledgerDB {------------------------------------------------------------------------------- Analysis: maintain a ledger state and time the five major ledger calculations @@ -535,10 +563,11 @@ traceLedgerProcessing - Block validation. -------------------------------------------------------------------------------} + benchmarkLedgerOps :: - forall blk. - ( HasAnalysis blk - , LedgerSupportsProtocol blk + forall blk. + ( LedgerSupportsProtocol blk + , HasAnalysis blk ) => Maybe FilePath -> LedgerApplicationMode @@ -557,22 +586,28 @@ benchmarkLedgerOps mOutfile ledgerAppMode AnalysisEnv {db, registry, startFrom, ((,) <$> GetBlock <*> GetBlockSize) startFrom limit - initLedger - (process outFileHandle outFormat) + () + (process initLedger initial outFileHandle outFormat) pure Nothing where ccfg = topLevelConfigProtocol cfg lcfg = topLevelConfigLedger cfg - FromLedgerState initLedger = startFrom + FromLedgerState initLedger initial = startFrom process :: - IO.Handle + LedgerDB.LedgerDB' IO blk + -> LedgerDB.TestInternals' IO blk + -> IO.Handle -> F.OutputFormat - -> ExtLedgerState blk + -> () -> (blk, SizeInBytes) - -> IO (ExtLedgerState blk) - process outFileHandle outFormat prevLedgerState (blk, sz) = do + -> IO () + process ledgerDB intLedgerDB outFileHandle outFormat _ (blk, sz) = do + (prevLedgerState, tables) <- LedgerDB.withPrivateTipForker ledgerDB $ \frk -> do + st <- IOLike.atomically $ LedgerDB.forkerGetLedgerState frk + tbs <- LedgerDB.forkerReadTables frk (getBlockKeySets blk) + pure (st, tbs) prevRtsStats <- GC.getRTSStats let -- Compute how many nanoseconds the mutator used from the last @@ -590,9 +625,10 @@ benchmarkLedgerOps mOutfile ledgerAppMode AnalysisEnv {db, registry, startFrom, -- 'time' takes care of forcing the evaluation of its argument's result. (ldgrView, tForecast) <- time $ forecast slot prevLedgerState (tkHdrSt, tHdrTick) <- time $ tickTheHeaderState slot prevLedgerState ldgrView - (hdrSt', tHdrApp) <- time $ applyTheHeader ldgrView tkHdrSt + (!_, tHdrApp) <- time $ applyTheHeader ldgrView tkHdrSt (tkLdgrSt, tBlkTick) <- time $ tickTheLedgerState slot prevLedgerState - (ldgrSt', tBlkApp) <- time $ applyTheBlock tkLdgrSt + let !tkLdgrSt' = applyDiffs (prevLedgerState `withLedgerTables` tables) tkLdgrSt + (!_, tBlkApp) <- time $ applyTheBlock tkLdgrSt' currentRtsStats <- GC.getRTSStats let @@ -624,13 +660,16 @@ benchmarkLedgerOps mOutfile ledgerAppMode AnalysisEnv {db, registry, startFrom, F.writeDataPoint outFileHandle outFormat slotDataPoint - pure $ ExtLedgerState ldgrSt' hdrSt' + LedgerDB.reapplyThenPushNOW intLedgerDB blk + LedgerDB.tryFlush ledgerDB + + pure () where rp = blockRealPoint blk forecast :: SlotNo - -> ExtLedgerState blk + -> ExtLedgerState blk mk -> IO (LedgerView (BlockProtocol blk)) forecast slot st = do let forecaster = ledgerViewForecastAt lcfg (ledgerState st) @@ -640,7 +679,7 @@ benchmarkLedgerOps mOutfile ledgerAppMode AnalysisEnv {db, registry, startFrom, tickTheHeaderState :: SlotNo - -> ExtLedgerState blk + -> ExtLedgerState blk mk -> LedgerView (BlockProtocol blk) -> IO (Ticked (HeaderState blk)) tickTheHeaderState slot st ledgerView = @@ -663,14 +702,14 @@ benchmarkLedgerOps mOutfile ledgerAppMode AnalysisEnv {db, registry, startFrom, tickTheLedgerState :: SlotNo - -> ExtLedgerState blk - -> IO (Ticked (LedgerState blk)) + -> ExtLedgerState blk EmptyMK + -> IO (Ticked1 (LedgerState blk) DiffMK) tickTheLedgerState slot st = pure $ applyChainTick lcfg slot (ledgerState st) applyTheBlock :: - Ticked (LedgerState blk) - -> IO (LedgerState blk) + TickedLedgerState blk ValuesMK + -> IO (LedgerState blk DiffMK) applyTheBlock tickedLedgerSt = case ledgerAppMode of LedgerApply -> case runExcept (lrResult <$> applyBlockLedgerResult lcfg blk tickedLedgerSt) of @@ -696,22 +735,34 @@ getBlockApplicationMetrics :: getBlockApplicationMetrics (NumberOfBlocks nrBlocks) mOutFile env = do withFile mOutFile $ \outFileHandle -> do writeHeaderLine outFileHandle separator (HasAnalysis.blockApplicationMetrics @blk) - void $ processAll db registry GetBlock startFrom limit initLedger (process outFileHandle) + void $ processAll db registry GetBlock startFrom limit () (process initLedger internal outFileHandle) pure Nothing where separator = ", " AnalysisEnv {db, registry, startFrom, cfg, limit } = env - FromLedgerState initLedger = startFrom + FromLedgerState initLedger internal = startFrom - process :: IO.Handle -> ExtLedgerState blk -> blk -> IO (ExtLedgerState blk) - process outFileHandle currLedgerSt blk = do - let nextLedgerSt = tickThenReapply (ExtLedgerCfg cfg) blk currLedgerSt + process :: + LedgerDB.LedgerDB' IO blk + -> LedgerDB.TestInternals' IO blk + -> IO.Handle + -> () + -> blk + -> IO () + process ledgerDB intLedgerDB outFileHandle _ blk = do + frk <- LedgerDB.getForkerAtWellKnownPoint ledgerDB registry VolatileTip + oldLedgerSt <- IOLike.atomically $ LedgerDB.forkerGetLedgerState frk + oldLedgerTbs <- LedgerDB.forkerReadTables frk (getBlockKeySets blk) + let oldLedger = oldLedgerSt `withLedgerTables` oldLedgerTbs + LedgerDB.forkerClose frk + + let nextLedgerSt = tickThenReapply (ExtLedgerCfg cfg) blk oldLedger when (unBlockNo (blockNo blk) `mod` nrBlocks == 0) $ do let blockApplication = HasAnalysis.WithLedgerState blk - (ledgerState currLedgerSt) - (ledgerState nextLedgerSt) + (ledgerState oldLedger) + (ledgerState $ applyDiffs oldLedger nextLedgerSt) computeAndWriteLine outFileHandle separator @@ -720,7 +771,10 @@ getBlockApplicationMetrics (NumberOfBlocks nrBlocks) mOutFile env = do IO.hFlush outFileHandle - return nextLedgerSt + LedgerDB.reapplyThenPushNOW intLedgerDB blk + LedgerDB.tryFlush ledgerDB + + pure () {------------------------------------------------------------------------------- Analysis: reforge the blocks, via the mempool @@ -745,10 +799,18 @@ reproMempoolForge numBlks env = do _ -> fail $ "--repro-mempool-and-forge only supports" <> "1 or 2 blocks at a time, not " <> show numBlks - ref <- IOLike.newTVarIO initLedger mempool <- Mempool.openMempoolWithoutSyncThread Mempool.LedgerInterface { - Mempool.getCurrentLedgerState = ledgerState <$> IOLike.readTVar ref + Mempool.getCurrentLedgerState = ledgerState <$> LedgerDB.getVolatileTip ledgerDB + , Mempool.getLedgerTablesAtFor = \pt txs -> do + frk <- LedgerDB.getForkerAtPoint ledgerDB registry pt + case frk of + Left _ -> pure Nothing + Right fr -> do + tbs <- Just . castLedgerTables <$> LedgerDB.forkerReadTables fr (castLedgerTables $ foldl' (<>) emptyLedgerTables $ map LedgerSupportsMempool.getTransactionKeySets txs) + LedgerDB.forkerClose fr + pure tbs + } lCfg -- one mebibyte should generously accomodate two blocks' worth of txs @@ -758,12 +820,12 @@ reproMempoolForge numBlks env = do ) nullTracer - void $ processAll db registry GetBlock startFrom limit Nothing (process howManyBlocks ref mempool) + void $ processAll db registry GetBlock startFrom limit Nothing (process howManyBlocks mempool) pure Nothing where AnalysisEnv { cfg - , startFrom = startFrom@(FromLedgerState initLedger) + , startFrom = startFrom@(FromLedgerState ledgerDB intLedgerDB) , db , registry , limit @@ -773,9 +835,6 @@ reproMempoolForge numBlks env = do lCfg :: LedgerConfig blk lCfg = configLedger cfg - elCfg :: LedgerCfg (ExtLedgerState blk) - elCfg = ExtLedgerCfg cfg - timed :: IO a -> IO (a, IOLike.DiffTime, Int64, Int64) timed m = do before <- IOLike.getMonotonicTime @@ -791,12 +850,11 @@ reproMempoolForge numBlks env = do process :: ReproMempoolForgeHowManyBlks - -> IOLike.StrictTVar IO (ExtLedgerState blk) -> Mempool.Mempool IO blk -> Maybe blk -> blk -> IO (Maybe blk) - process howManyBlocks ref mempool mbBlk blk' = (\() -> Just blk') <$> do + process howManyBlocks mempool mbBlk blk' = (\() -> Just blk') <$> do -- add this block's transactions to the mempool do results <- Mempool.addTxs mempool $ LedgerSupportsMempool.extractTxs blk' @@ -820,20 +878,21 @@ reproMempoolForge numBlks env = do case scrutinee of Nothing -> pure () Just blk -> do - st <- IOLike.readTVarIO ref + LedgerDB.withPrivateTipForker ledgerDB $ \forker -> do + st <- IOLike.atomically $ LedgerDB.forkerGetLedgerState forker - -- time the suspected slow parts of the forge thread that created - -- this block - -- - -- Primary caveat: that thread's mempool may have had more transactions in it. - do + -- time the suspected slow parts of the forge thread that created + -- this block + -- + -- Primary caveat: that thread's mempool may have had more transactions in it. let slot = blockSlot blk (ticked, durTick, mutTick, gcTick) <- timed $ IOLike.evaluate $ - applyChainTick lCfg slot (ledgerState st) - ((), durSnap, mutSnap, gcSnap) <- timed $ IOLike.atomically $ do - snap <- Mempool.getSnapshotFor mempool $ Mempool.ForgeInKnownSlot slot ticked + applyChainTick lCfg slot (ledgerState st) + ((), durSnap, mutSnap, gcSnap) <- timed $ do + snap <- Mempool.getSnapshotFor mempool slot ticked $ + fmap castLedgerTables . LedgerDB.forkerReadTables forker . castLedgerTables - pure $ length (Mempool.snapshotTxs snap) `seq` Mempool.snapshotLedgerState snap `seq` () + pure $ length (Mempool.snapshotTxs snap) `seq` Mempool.snapshotState snap `seq` () let sizes = HasAnalysis.blockTxSizes blk traceWith tracer $ @@ -857,7 +916,8 @@ reproMempoolForge numBlks env = do -- since it currently matches the call in the forging thread, which is -- the primary intention of this Analysis. Maybe GHC's CSE is already -- doing this sharing optimization? - IOLike.atomically $ IOLike.writeTVar ref $! tickThenReapply elCfg blk st + LedgerDB.reapplyThenPushNOW intLedgerDB blk + LedgerDB.tryFlush ledgerDB -- this flushes blk from the mempool, since every tx in it is now on the chain void $ Mempool.syncWithLedger mempool @@ -879,17 +939,18 @@ processAllUntil :: => ImmutableDB IO blk -> ResourceRegistry IO -> BlockComponent blk b - -> AnalysisStartFrom blk startFrom + -> AnalysisStartFrom IO blk startFrom -> Limit -> st -> (st -> b -> IO (NextStep, st)) -> IO st processAllUntil immutableDB registry blockComponent startFrom limit initState callback = do + st <- startFromPoint startFrom itr <- ImmutableDB.streamAfterKnownPoint immutableDB registry blockComponent - (startFromPoint startFrom) + st go itr limit initState where go :: ImmutableDB.Iterator IO blk b -> Limit -> st -> IO st @@ -908,7 +969,7 @@ processAll :: => ImmutableDB IO blk -> ResourceRegistry IO -> BlockComponent blk b - -> AnalysisStartFrom blk startFrom + -> AnalysisStartFrom IO blk startFrom -> Limit -> st -> (st -> b -> IO st) @@ -923,7 +984,7 @@ processAll_ :: => ImmutableDB IO blk -> ResourceRegistry IO -> BlockComponent blk b - -> AnalysisStartFrom blk startFrom + -> AnalysisStartFrom IO blk startFrom -> Limit -> (b -> IO ()) -> IO () diff --git a/ouroboros-consensus-cardano/src/unstable-cardano-tools/Cardano/Tools/DBAnalyser/Block/Cardano.hs b/ouroboros-consensus-cardano/src/unstable-cardano-tools/Cardano/Tools/DBAnalyser/Block/Cardano.hs index 341f0bcc08..c8896cf58d 100644 --- a/ouroboros-consensus-cardano/src/unstable-cardano-tools/Cardano/Tools/DBAnalyser/Block/Cardano.hs +++ b/ouroboros-consensus-cardano/src/unstable-cardano-tools/Cardano/Tools/DBAnalyser/Block/Cardano.hs @@ -50,6 +50,7 @@ import Data.Map.Strict (Map) import qualified Data.Map.Strict as Map import Data.Maybe (fromJust) import Data.SOP.BasicFunctors +import Data.SOP.Functors import Data.SOP.Strict import qualified Data.SOP.Telescope as Telescope import Data.String (IsString (..)) @@ -108,15 +109,15 @@ analyseWithLedgerState f (WithLedgerState cb sb sa) = p :: Proxy HasAnalysis p = Proxy - zipLS (Comp (Just sb')) (Comp (Just sa')) (I blk) = + zipLS (Comp (Just (Flip sb'))) (Comp (Just (Flip sa'))) (I blk) = Comp . Just $ WithLedgerState blk sb' sa' zipLS _ _ _ = Comp Nothing oeb = getOneEraBlock . getHardForkBlock $ cb goLS :: - LedgerState (CardanoBlock StandardCrypto) -> - NP (Maybe :.: LedgerState) (CardanoEras StandardCrypto) + LedgerState (CardanoBlock StandardCrypto) mk -> + NP (Maybe :.: Flip LedgerState mk) (CardanoEras StandardCrypto) goLS = hexpand (Comp Nothing) . hmap (Comp . Just . currentState) @@ -300,9 +301,9 @@ instance (HasAnnTip (CardanoBlock StandardCrypto), GetPrevHash (CardanoBlock Sta ] dispatch :: - LedgerState (CardanoBlock StandardCrypto) - -> (LedgerState ByronBlock -> IO Builder) - -> (forall proto era. LedgerState (ShelleyBlock proto era) -> IO Builder) + LedgerState (CardanoBlock StandardCrypto) ValuesMK + -> (LedgerState ByronBlock ValuesMK -> IO Builder) + -> (forall proto era. LedgerState (ShelleyBlock proto era) ValuesMK -> IO Builder) -> IO Builder dispatch cardanoSt fByron fShelley = hcollapse $ @@ -317,22 +318,22 @@ dispatch cardanoSt fByron fShelley = ) (hardForkLedgerStatePerEra cardanoSt) where - k_fByron = K . fByron + k_fByron = K . fByron . unFlip k_fShelley :: forall proto era. - LedgerState (ShelleyBlock proto era) + Flip LedgerState ValuesMK (ShelleyBlock proto era) -> K (IO Builder) (ShelleyBlock proto era) - k_fShelley = K . fShelley + k_fShelley = K . fShelley . unFlip applyToByronUtxo :: (Map Byron.UTxO.CompactTxIn Byron.UTxO.CompactTxOut -> IO Builder) - -> LedgerState ByronBlock + -> LedgerState ByronBlock ValuesMK -> IO Builder applyToByronUtxo f st = f $ getByronUtxo st -getByronUtxo :: LedgerState ByronBlock +getByronUtxo :: LedgerState ByronBlock ValuesMK -> Map Byron.UTxO.CompactTxIn Byron.UTxO.CompactTxOut getByronUtxo = Byron.UTxO.unUTxO . Byron.Block.cvsUtxo @@ -340,13 +341,13 @@ getByronUtxo = Byron.UTxO.unUTxO applyToShelleyBasedUtxo :: (Map (TxIn (Cardano.Block.EraCrypto era)) (TxOut era) -> IO Builder) - -> LedgerState (ShelleyBlock proto era) + -> LedgerState (ShelleyBlock proto era) ValuesMK -> IO Builder applyToShelleyBasedUtxo f st = do f $ getShelleyBasedUtxo st getShelleyBasedUtxo :: - LedgerState (ShelleyBlock proto era) + LedgerState (ShelleyBlock proto era) ValuesMK -> Map (TxIn (Cardano.Block.EraCrypto era)) (TxOut era) getShelleyBasedUtxo = (\(Shelley.UTxO.UTxO xs)-> xs) . Shelley.LedgerState.utxosUtxo diff --git a/ouroboros-consensus-cardano/src/unstable-cardano-tools/Cardano/Tools/DBAnalyser/HasAnalysis.hs b/ouroboros-consensus-cardano/src/unstable-cardano-tools/Cardano/Tools/DBAnalyser/HasAnalysis.hs index d16395d5ac..f8b6074552 100644 --- a/ouroboros-consensus-cardano/src/unstable-cardano-tools/Cardano/Tools/DBAnalyser/HasAnalysis.hs +++ b/ouroboros-consensus-cardano/src/unstable-cardano-tools/Cardano/Tools/DBAnalyser/HasAnalysis.hs @@ -23,8 +23,8 @@ import Text.Builder (Builder) data WithLedgerState blk = WithLedgerState { wlsBlk :: blk - , wlsStateBefore :: LedgerState blk - , wlsStateAfter :: LedgerState blk + , wlsStateBefore :: LedgerState blk ValuesMK + , wlsStateAfter :: LedgerState blk ValuesMK } class (HasAnnTip blk, GetPrevHash blk, Condense (HeaderHash blk)) => HasAnalysis blk where diff --git a/ouroboros-consensus-cardano/src/unstable-cardano-tools/Cardano/Tools/DBAnalyser/Run.hs b/ouroboros-consensus-cardano/src/unstable-cardano-tools/Cardano/Tools/DBAnalyser/Run.hs index f896ef4e01..72aeb07b29 100644 --- a/ouroboros-consensus-cardano/src/unstable-cardano-tools/Cardano/Tools/DBAnalyser/Run.hs +++ b/ouroboros-consensus-cardano/src/unstable-cardano-tools/Cardano/Tools/DBAnalyser/Run.hs @@ -10,36 +10,79 @@ module Cardano.Tools.DBAnalyser.Run (analyse) where import Cardano.Tools.DBAnalyser.Analysis import Cardano.Tools.DBAnalyser.HasAnalysis import Cardano.Tools.DBAnalyser.Types -import Codec.Serialise (Serialise (decode)) -import Control.Monad.Except (runExceptT) import Control.ResourceRegistry import Control.Tracer (Tracer (..), nullTracer) import Data.Singletons (Sing, SingI (..)) +import qualified Data.SOP.Dict as Dict import qualified Debug.Trace as Debug import Ouroboros.Consensus.Block import Ouroboros.Consensus.Config -import Ouroboros.Consensus.Ledger.Extended +import Ouroboros.Consensus.HardFork.Abstract +import Ouroboros.Consensus.Ledger.Basics +import Ouroboros.Consensus.Ledger.Inspect import qualified Ouroboros.Consensus.Ledger.SupportsMempool as LedgerSupportsMempool (HasTxs) +import Ouroboros.Consensus.Ledger.SupportsProtocol import qualified Ouroboros.Consensus.Node as Node import qualified Ouroboros.Consensus.Node.InitStorage as Node import Ouroboros.Consensus.Node.ProtocolInfo (ProtocolInfo (..)) import qualified Ouroboros.Consensus.Storage.ChainDB as ChainDB -import Ouroboros.Consensus.Storage.ChainDB.Impl.Args -import Ouroboros.Consensus.Storage.ChainDB.Impl.LgrDB (lgrHasFS) +import qualified Ouroboros.Consensus.Storage.ChainDB.Impl.Args as ChainDB import qualified Ouroboros.Consensus.Storage.ImmutableDB as ImmutableDB -import Ouroboros.Consensus.Storage.LedgerDB (DiskSnapshot (..), - readSnapshot) +import qualified Ouroboros.Consensus.Storage.ImmutableDB.Impl.Stream as ImmutableDB +import qualified Ouroboros.Consensus.Storage.LedgerDB as LedgerDB +import qualified Ouroboros.Consensus.Storage.LedgerDB.Impl.Args as LedgerDB +import qualified Ouroboros.Consensus.Storage.LedgerDB.Impl.Init as LedgerDB +import qualified Ouroboros.Consensus.Storage.LedgerDB.V1.Args as LedgerDB.V1 +import qualified Ouroboros.Consensus.Storage.LedgerDB.V1.BackingStore.API as BS +import qualified Ouroboros.Consensus.Storage.LedgerDB.V1.BackingStore.Impl.LMDB as LMDB +import qualified Ouroboros.Consensus.Storage.LedgerDB.V1.Init as LedgerDB.V1 +import qualified Ouroboros.Consensus.Storage.LedgerDB.V2.Args as LedgerDB.V2 +import Ouroboros.Consensus.Util.Args import Ouroboros.Consensus.Util.IOLike import Ouroboros.Consensus.Util.Orphans () +import Ouroboros.Network.Block (genesisPoint) import System.IO import Text.Printf (printf) - {------------------------------------------------------------------------------- Analyse -------------------------------------------------------------------------------} +openLedgerDB :: + ( LedgerSupportsProtocol blk + , InspectLedger blk + , LedgerDB.LedgerDbSerialiseConstraints blk + , HasHardForkHistory blk + ) + => Complete LedgerDB.LedgerDbArgs IO blk + -> IO ( LedgerDB.LedgerDB' IO blk + , LedgerDB.TestInternals' IO blk + ) +openLedgerDB lgrDbArgs@LedgerDB.LedgerDbArgs{LedgerDB.lgrFlavorArgs=LedgerDB.LedgerDbFlavorArgsV1 bss} = do + (ledgerDB, _, intLedgerDB) <- + LedgerDB.openDBInternal + lgrDbArgs + (LedgerDB.V1.mkInitDb + lgrDbArgs + bss + (\_ -> error "no replay")) + emptyStream + genesisPoint + pure (ledgerDB, intLedgerDB) +openLedgerDB LedgerDB.LedgerDbArgs{LedgerDB.lgrFlavorArgs=LedgerDB.LedgerDbFlavorArgsV2{}} = + error "not defined for v2, use v1 instead for now!" + +emptyStream :: Applicative m => ImmutableDB.StreamAPI m blk a +emptyStream = ImmutableDB.StreamAPI $ \_ k -> k $ Right $ pure ImmutableDB.NoMoreItems + +defaultLMDBLimits :: LMDB.LMDBLimits +defaultLMDBLimits = LMDB.LMDBLimits + { LMDB.lmdbMapSize = 16 * 1024 * 1024 * 1024 + , LMDB.lmdbMaxDatabases = 10 + , LMDB.lmdbMaxReaders = 16 + } + analyse :: forall blk . ( Node.RunNode blk @@ -47,32 +90,40 @@ analyse :: , HasAnalysis blk , HasProtocolInfo blk , LedgerSupportsMempool.HasTxs blk + , CanStowLedgerTables (LedgerState blk) ) => DBAnalyserConfig -> Args blk -> IO (Maybe AnalysisResult) -analyse DBAnalyserConfig{analysis, confLimit, dbDir, selectDB, validation, verbose} args = +analyse DBAnalyserConfig{analysis, confLimit, dbDir, selectDB, validation, verbose, ldbBackend} args = withRegistry $ \registry -> do lock <- newMVar () chainDBTracer <- mkTracer lock verbose analysisTracer <- mkTracer lock True ProtocolInfo { pInfoInitLedger = genesisLedger, pInfoConfig = cfg } <- mkProtocolInfo args - let chunkInfo = Node.nodeImmutableDbChunkInfo (configStorage cfg) - chainDbArgs = - maybeValidateAll - $ updateTracer chainDBTracer - $ completeChainDbArgs - registry - cfg - genesisLedger - chunkInfo - (const True) - (Node.stdMkChainDbHasFS dbDir) - (Node.stdMkChainDbHasFS dbDir) - $ defaultArgs + let shfs = Node.stdMkChainDbHasFS dbDir + chunkInfo = Node.nodeImmutableDbChunkInfo (configStorage cfg) + flavargs = case ldbBackend of + V1InMem -> LedgerDB.LedgerDbFlavorArgsV1 + (LedgerDB.V1.V1Args LedgerDB.V1.DisableFlushing LedgerDB.V1.DisableQuerySize LedgerDB.V1.InMemoryBackingStoreArgs) + V1LMDB -> LedgerDB.LedgerDbFlavorArgsV1 + (LedgerDB.V1.V1Args LedgerDB.V1.DisableFlushing LedgerDB.V1.DisableQuerySize (LedgerDB.V1.LMDBBackingStoreArgs (BS.LiveLMDBFS (shfs (ChainDB.RelativeMountPoint "lmdb"))) defaultLMDBLimits Dict.Dict)) + V2InMem -> LedgerDB.LedgerDbFlavorArgsV2 (LedgerDB.V2.V2Args LedgerDB.V2.InMemoryHandleArgs) + args' = + ChainDB.completeChainDbArgs + registry + cfg + genesisLedger + chunkInfo + (const True) + shfs + shfs + flavargs $ + ChainDB.defaultArgs + chainDbArgs = maybeValidateAll $ ChainDB.updateTracer chainDBTracer args' immutableDbArgs = ChainDB.cdbImmDbArgs chainDbArgs - ledgerDbFS = lgrHasFS $ ChainDB.cdbLgrDbArgs chainDbArgs + ldbArgs = ChainDB.cdbLgrDbArgs args' withImmutableDB immutableDbArgs $ \(immutableDB, internal) -> do SomeAnalysis (Proxy :: Proxy startFrom) ana <- pure $ runAnalysis analysis @@ -83,35 +134,19 @@ analyse DBAnalyserConfig{analysis, confLimit, dbDir, selectDB, validation, verbo Just hash -> pure $ BlockPoint slot hash Nothing -> fail $ "No block with given slot in the ImmutableDB: " <> show slot SStartFromLedgerState -> do - -- TODO we need to check if the snapshot exists. If not, print an - -- error and ask the user if she wanted to create a snapshot first and - -- how to do it. - initLedgerErr <- runExceptT $ case startSlot of - Origin -> pure genesisLedger - NotOrigin (SlotNo slot) -> readSnapshot - ledgerDbFS - (decodeDiskExtLedgerState $ configCodec cfg) - decode - (DiskSnapshot slot (Just "db-analyser")) - -- TODO @readSnapshot@ has type @ExceptT ReadIncrementalErr m - -- (ExtLedgerState blk)@ but it also throws exceptions! This makes - -- error handling more challenging than it ought to be. Maybe we - -- can enrich the error that @readSnapthot@ return, so that it can - -- contain the @HasFS@ errors as well. - initLedger <- either (error . show) pure initLedgerErr + (ledgerDB, intLedgerDB) <- openLedgerDB ldbArgs -- This marker divides the "loading" phase of the program, where the -- system is principally occupied with reading snapshot data from -- disk, from the "processing" phase, where we are streaming blocks -- and running the ledger processing on them. Debug.traceMarkerIO "SNAPSHOT_LOADED" - pure $ FromLedgerState initLedger + pure $ FromLedgerState ledgerDB intLedgerDB result <- ana AnalysisEnv { cfg , startFrom , db = immutableDB , registry - , ledgerDbFS = ledgerDbFS , limit = confLimit , tracer = analysisTracer } @@ -138,7 +173,7 @@ analyse DBAnalyserConfig{analysis, confLimit, dbDir, selectDB, validation, verbo withLock = bracket_ (takeMVar lock) (putMVar lock ()) maybeValidateAll = case (analysis, validation) of - (_, Just ValidateAllBlocks) -> ensureValidateAll + (_, Just ValidateAllBlocks) -> ChainDB.ensureValidateAll (_, Just MinimumBlockValidation) -> id - (OnlyValidation, _ ) -> ensureValidateAll + (OnlyValidation, _ ) -> ChainDB.ensureValidateAll _ -> id diff --git a/ouroboros-consensus-cardano/src/unstable-cardano-tools/Cardano/Tools/DBAnalyser/Types.hs b/ouroboros-consensus-cardano/src/unstable-cardano-tools/Cardano/Tools/DBAnalyser/Types.hs index d0ba0cddfe..e83a926182 100644 --- a/ouroboros-consensus-cardano/src/unstable-cardano-tools/Cardano/Tools/DBAnalyser/Types.hs +++ b/ouroboros-consensus-cardano/src/unstable-cardano-tools/Cardano/Tools/DBAnalyser/Types.hs @@ -15,6 +15,7 @@ data DBAnalyserConfig = DBAnalyserConfig { , validation :: Maybe ValidateBlocks , analysis :: AnalysisName , confLimit :: Limit + , ldbBackend :: LedgerDBBackend } data AnalysisName = @@ -47,6 +48,8 @@ newtype NumberOfBlocks = NumberOfBlocks { unNumberOfBlocks :: Word64 } data Limit = Limit Int | Unlimited +data LedgerDBBackend = V1InMem | V1LMDB | V2InMem + -- | The extent of the ChainDB on-disk files validation. This is completely -- unrelated to validation of the ledger rules. data ValidateBlocks = ValidateAllBlocks | MinimumBlockValidation diff --git a/ouroboros-consensus-cardano/src/unstable-cardano-tools/Cardano/Tools/DBSynthesizer/Forging.hs b/ouroboros-consensus-cardano/src/unstable-cardano-tools/Cardano/Tools/DBSynthesizer/Forging.hs index f49771ea4a..52881aad31 100644 --- a/ouroboros-consensus-cardano/src/unstable-cardano-tools/Cardano/Tools/DBSynthesizer/Forging.hs +++ b/ouroboros-consensus-cardano/src/unstable-cardano-tools/Cardano/Tools/DBSynthesizer/Forging.hs @@ -15,6 +15,7 @@ import Control.Monad (when) import Control.Monad.Except (runExcept) import Control.Monad.IO.Class (liftIO) import Control.Monad.Trans.Except (ExceptT, runExceptT, throwE) +import Control.ResourceRegistry import Control.Tracer as Trace (nullTracer) import Data.Either (isRight) import Data.Maybe (isJust) @@ -33,16 +34,21 @@ import Ouroboros.Consensus.Ledger.Basics import Ouroboros.Consensus.Ledger.Extended import Ouroboros.Consensus.Ledger.SupportsMempool (GenTx) import Ouroboros.Consensus.Ledger.SupportsProtocol +import Ouroboros.Consensus.Ledger.Tables.Utils (forgetLedgerTables) import Ouroboros.Consensus.Protocol.Abstract (ChainDepState, tickChainDepState) import Ouroboros.Consensus.Storage.ChainDB.API as ChainDB (AddBlockResult (..), ChainDB, addBlockAsync, - blockProcessed, getCurrentChain, getPastLedger) + blockProcessed, getCurrentChain, getPastLedger, + getReadOnlyForkerAtPoint) import qualified Ouroboros.Consensus.Storage.ChainDB.API.Types.InvalidBlockPunishment as InvalidBlockPunishment (noPunishment) +import Ouroboros.Consensus.Storage.LedgerDB.API +import Ouroboros.Consensus.Ticked import Ouroboros.Consensus.Util.IOLike (atomically) import Ouroboros.Network.AnchoredFragment as AF (Anchor (..), AnchoredFragment, AnchoredSeq (..), headPoint) +import Ouroboros.Network.Protocol.LocalStateQuery.Type data ForgeState = @@ -57,13 +63,13 @@ initialForgeState :: ForgeState initialForgeState = ForgeState 0 0 0 0 -- | An action to generate transactions for a given block -type GenTxs blk = SlotNo -> TickedLedgerState blk -> IO [Validated (GenTx blk)] +type GenTxs blk mk = SlotNo -> (IO (ReadOnlyForker IO (ExtLedgerState blk) blk)) -> TickedLedgerState blk DiffMK -> IO [Validated (GenTx blk)] -- DUPLICATE: runForge mirrors forging loop from ouroboros-consensus/src/Ouroboros/Consensus/NodeKernel.hs -- For an extensive commentary of the forging loop, see there. runForge :: - forall blk. + forall blk mk. ( LedgerSupportsProtocol blk ) => EpochSize -> SlotNo @@ -71,7 +77,7 @@ runForge :: -> ChainDB IO blk -> [BlockForging IO blk] -> TopLevelConfig blk - -> GenTxs blk + -> GenTxs blk mk -> IO ForgeResult runForge epochSize_ nextSlot opts chainDB blockForging cfg genTxs = do putStrLn $ "--> epoch size: " ++ show epochSize_ @@ -157,7 +163,7 @@ runForge epochSize_ nextSlot opts chainDB blockForging cfg genTxs = do _ -> exitEarly' "NoLeader" -- Tick the ledger state for the 'SlotNo' we're producing a block for - let tickedLedgerState :: Ticked (LedgerState blk) + let tickedLedgerState :: Ticked1 (LedgerState blk) DiffMK tickedLedgerState = applyChainTick (configLedger cfg) @@ -165,7 +171,13 @@ runForge epochSize_ nextSlot opts chainDB blockForging cfg genTxs = do (ledgerState unticked) -- Let the caller generate transactions - txs <- lift $ genTxs currentSlot tickedLedgerState + txs <- lift $ withRegistry $ \reg -> + genTxs + currentSlot + ( either (error "Impossible: we are forging on top of a block that the ChainDB cannot create forkers on!") id + <$> getReadOnlyForkerAtPoint chainDB reg (SpecificPoint bcPrevPoint) + ) + tickedLedgerState -- Actually produce the block newBlock <- lift $ @@ -173,7 +185,7 @@ runForge epochSize_ nextSlot opts chainDB blockForging cfg genTxs = do cfg bcBlockNo currentSlot - tickedLedgerState + (forgetLedgerTables tickedLedgerState) txs proof diff --git a/ouroboros-consensus-cardano/src/unstable-cardano-tools/Cardano/Tools/DBSynthesizer/Run.hs b/ouroboros-consensus-cardano/src/unstable-cardano-tools/Cardano/Tools/DBSynthesizer/Run.hs index cc88fbd694..0f3fae4a5f 100644 --- a/ouroboros-consensus-cardano/src/unstable-cardano-tools/Cardano/Tools/DBSynthesizer/Run.hs +++ b/ouroboros-consensus-cardano/src/unstable-cardano-tools/Cardano/Tools/DBSynthesizer/Run.hs @@ -17,7 +17,7 @@ import Control.Monad.Trans.Except (ExceptT) import Control.Monad.Trans.Except.Extra (firstExceptT, handleIOExceptT, hoistEither, runExceptT) import Control.ResourceRegistry -import Control.Tracer (nullTracer) +import Control.Tracer import Data.Aeson as Aeson (FromJSON, Result (..), Value, eitherDecodeFileStrict', eitherDecodeStrict', fromJSON) import Data.Bool (bool) @@ -33,8 +33,10 @@ import Ouroboros.Consensus.Node.ProtocolInfo (ProtocolInfo (..)) import Ouroboros.Consensus.Shelley.Node (ShelleyGenesis (..), validateGenesis) import qualified Ouroboros.Consensus.Storage.ChainDB as ChainDB (getTipPoint) -import qualified Ouroboros.Consensus.Storage.ChainDB.Impl as ChainDB (withDB) +import qualified Ouroboros.Consensus.Storage.ChainDB.Impl as ChainDB import qualified Ouroboros.Consensus.Storage.ChainDB.Impl.Args as ChainDB +import Ouroboros.Consensus.Storage.LedgerDB.Impl.Args as LedgerDB +import Ouroboros.Consensus.Storage.LedgerDB.V1.Args as LedgerDB.V1 import Ouroboros.Consensus.Util.IOLike (atomically) import Ouroboros.Network.Block import Ouroboros.Network.Point (WithOrigin (..)) @@ -113,16 +115,19 @@ eitherParseJson v = case fromJSON v of synthesize :: ( TopLevelConfig (CardanoBlock StandardCrypto) - -> GenTxs (CardanoBlock StandardCrypto) + -> GenTxs (CardanoBlock StandardCrypto) mk ) -> DBSynthesizerConfig -> (CardanoProtocolParams StandardCrypto) -> IO ForgeResult synthesize genTxs DBSynthesizerConfig{confOptions, confShelleyGenesis, confDbDir} runP = withRegistry $ \registry -> do + let epochSize = sgEpochLength confShelleyGenesis chunkInfo = Node.nodeImmutableDbChunkInfo (configStorage pInfoConfig) + bss = LedgerDB.V1.V1Args LedgerDB.V1.DisableFlushing LedgerDB.V1.DisableQuerySize $ InMemoryBackingStoreArgs + flavargs = LedgerDB.LedgerDbFlavorArgsV1 bss dbArgs = ChainDB.completeChainDbArgs registry @@ -132,7 +137,8 @@ synthesize genTxs DBSynthesizerConfig{confOptions, confShelleyGenesis, confDbDir (const True) (Node.stdMkChainDbHasFS confDbDir) (Node.stdMkChainDbHasFS confDbDir) - $ ChainDB.defaultArgs + flavargs $ + ChainDB.defaultArgs forgers <- blockForging let fCount = length forgers diff --git a/ouroboros-consensus-cardano/src/unstable-cardano-tools/Cardano/Tools/DBTruncater/Run.hs b/ouroboros-consensus-cardano/src/unstable-cardano-tools/Cardano/Tools/DBTruncater/Run.hs index 236c5cc5e8..40fabdcb74 100644 --- a/ouroboros-consensus-cardano/src/unstable-cardano-tools/Cardano/Tools/DBTruncater/Run.hs +++ b/ouroboros-consensus-cardano/src/unstable-cardano-tools/Cardano/Tools/DBTruncater/Run.hs @@ -27,6 +27,7 @@ import Ouroboros.Consensus.Storage.ImmutableDB (ImmutableDB, Iterator, IteratorResult (..), Tip (..)) import qualified Ouroboros.Consensus.Storage.ImmutableDB as ImmutableDB import Ouroboros.Consensus.Storage.ImmutableDB.Impl +import Ouroboros.Consensus.Util.Args import Ouroboros.Consensus.Util.IOLike import Prelude hiding (truncate) import System.IO @@ -46,7 +47,7 @@ truncate DBTruncaterConfig{ dbDir, truncateAfter, verbose } args = do let fs = Node.stdMkChainDbHasFS dbDir (RelativeMountPoint "immutable") chunkInfo = Node.nodeImmutableDbChunkInfo (configStorage config) - immutableDBArgs :: ImmutableDbArgs Identity IO block + immutableDBArgs :: Complete ImmutableDbArgs IO block immutableDBArgs = (ImmutableDB.defaultArgs @IO) { immTracer = immutableDBTracer diff --git a/ouroboros-consensus-cardano/src/unstable-shelley-testlib/Test/Consensus/Shelley/Examples.hs b/ouroboros-consensus-cardano/src/unstable-shelley-testlib/Test/Consensus/Shelley/Examples.hs index d503c33ee2..0adee2f289 100644 --- a/ouroboros-consensus-cardano/src/unstable-shelley-testlib/Test/Consensus/Shelley/Examples.hs +++ b/ouroboros-consensus-cardano/src/unstable-shelley-testlib/Test/Consensus/Shelley/Examples.hs @@ -20,15 +20,24 @@ module Test.Consensus.Shelley.Examples ( ) where import qualified Cardano.Ledger.Block as SL +import qualified Cardano.Ledger.Core as LC import Cardano.Ledger.Crypto (Crypto) +import Cardano.Ledger.Era (getAllTxInputs) +import Cardano.Ledger.TxIn import qualified Cardano.Protocol.TPraos.BHeader as SL import Data.Coerce (coerce) +import Data.Foldable (toList) import Data.List.NonEmpty (NonEmpty ((:|))) +import qualified Data.Map as Map import qualified Data.Set as Set +import Lens.Micro import Ouroboros.Consensus.Block import Ouroboros.Consensus.HeaderValidation import Ouroboros.Consensus.Ledger.Extended +import Ouroboros.Consensus.Ledger.Query import Ouroboros.Consensus.Ledger.SupportsMempool +import Ouroboros.Consensus.Ledger.Tables +import Ouroboros.Consensus.Ledger.Tables.Utils import Ouroboros.Consensus.Protocol.Abstract (TranslateProto, translateChainDepState) import Ouroboros.Consensus.Protocol.Praos (Praos) @@ -64,7 +73,6 @@ import Test.Util.Serialisation.Examples (Examples (..), labelled, unlabelled) import Test.Util.Serialisation.SomeResult (SomeResult (..)) - {------------------------------------------------------------------------------- Examples -------------------------------------------------------------------------------} @@ -72,6 +80,30 @@ import Test.Util.Serialisation.SomeResult (SomeResult (..)) codecConfig :: CodecConfig StandardShelleyBlock codecConfig = ShelleyCodecConfig +mkLedgerTables :: forall proto era. + ShelleyCompatible proto era + => LC.Tx era + -> LedgerTables (LedgerState (ShelleyBlock proto era)) ValuesMK +mkLedgerTables tx = + LedgerTables + $ ValuesMK + $ Map.fromList + $ zip exampleTxIns exampleTxOuts + where + exampleTxIns :: [TxIn (EraCrypto era)] + exampleTxIns = case toList $ getAllTxInputs (tx ^. LC.bodyTxL) of + [] -> error "No transaction inputs were provided to construct the ledger tables" + -- We require at least one transaction input (and one + -- transaction output) in the example provided by + -- cardano-ledger to make sure that we test the serialization + -- of ledger tables with at least one non-trivial example. + xs -> xs + + exampleTxOuts :: [LC.TxOut era] + exampleTxOuts = case toList (tx ^. (LC.bodyTxL . LC.outputsTxBodyL)) of + [] -> error "No transaction outputs were provided to construct the ledger tables" + xs -> xs + fromShelleyLedgerExamples :: ShelleyCompatible (TPraos (EraCrypto era)) era => ShelleyLedgerExamples era @@ -95,6 +127,7 @@ fromShelleyLedgerExamples ShelleyLedgerExamples { , exampleChainDepState = unlabelled chainDepState , exampleExtLedgerState = unlabelled extLedgerState , exampleSlotNo = unlabelled slotNo + , exampleLedgerTables = unlabelled $ mkLedgerTables sleTx } where blk = mkShelleyBlock sleBlock @@ -105,14 +138,14 @@ fromShelleyLedgerExamples ShelleyLedgerExamples { serialisedHeader = SerialisedHeaderFromDepPair $ GenDepPair (NestedCtxt CtxtShelley) (Serialised "
") queries = labelled [ - ("GetLedgerTip", SomeSecond GetLedgerTip) - , ("GetEpochNo", SomeSecond GetEpochNo) - , ("GetCurrentPParams", SomeSecond GetCurrentPParams) - , ("GetProposedPParamsUpdates", SomeSecond GetProposedPParamsUpdates) - , ("GetStakeDistribution", SomeSecond GetStakeDistribution) - , ("GetNonMyopicMemberRewards", SomeSecond $ GetNonMyopicMemberRewards sleRewardsCredentials) - , ("GetGenesisConfig", SomeSecond GetGenesisConfig) - , ("GetBigLedgerPeerSnapshot", SomeSecond GetBigLedgerPeerSnapshot) + ("GetLedgerTip", SomeBlockQuery GetLedgerTip) + , ("GetEpochNo", SomeBlockQuery GetEpochNo) + , ("GetCurrentPParams", SomeBlockQuery GetCurrentPParams) + , ("GetProposedPParamsUpdates", SomeBlockQuery GetProposedPParamsUpdates) + , ("GetStakeDistribution", SomeBlockQuery GetStakeDistribution) + , ("GetNonMyopicMemberRewards", SomeBlockQuery $ GetNonMyopicMemberRewards sleRewardsCredentials) + , ("GetGenesisConfig", SomeBlockQuery GetGenesisConfig) + , ("GetBigLedgerPeerSnapshot", SomeBlockQuery GetBigLedgerPeerSnapshot) ] results = labelled [ ("LedgerTip", SomeResult GetLedgerTip (blockPoint blk)) @@ -142,12 +175,14 @@ fromShelleyLedgerExamples ShelleyLedgerExamples { } , shelleyLedgerState = sleNewEpochState , shelleyLedgerTransition = ShelleyTransitionInfo {shelleyAfterVoting = 0} + , shelleyLedgerTables = LedgerTables EmptyMK } chainDepState = TPraosState (NotOrigin 1) sleChainDepState extLedgerState = ExtLedgerState ledgerState (genesisHeaderState chainDepState) + -- | TODO Factor this out into something nicer. fromShelleyLedgerExamplesPraos :: forall era. @@ -172,6 +207,7 @@ fromShelleyLedgerExamplesPraos ShelleyLedgerExamples { , exampleResult = results , exampleAnnTip = unlabelled annTip , exampleLedgerState = unlabelled ledgerState + , exampleLedgerTables = unlabelled $ mkLedgerTables sleTx , exampleChainDepState = unlabelled chainDepState , exampleExtLedgerState = unlabelled extLedgerState , exampleSlotNo = unlabelled slotNo @@ -180,7 +216,7 @@ fromShelleyLedgerExamplesPraos ShelleyLedgerExamples { blk = mkShelleyBlock $ let SL.Block hdr1 bdy = sleBlock in SL.Block (translateHeader hdr1) bdy - translateHeader :: Crypto c => SL.BHeader c -> Praos.Header c + translateHeader :: Cardano.Ledger.Crypto.Crypto c => SL.BHeader c -> Praos.Header c translateHeader (SL.BHeader bhBody bhSig) = Praos.Header hBody hSig where @@ -204,13 +240,13 @@ fromShelleyLedgerExamplesPraos ShelleyLedgerExamples { serialisedHeader = SerialisedHeaderFromDepPair $ GenDepPair (NestedCtxt CtxtShelley) (Serialised "
") queries = labelled [ - ("GetLedgerTip", SomeSecond GetLedgerTip) - , ("GetEpochNo", SomeSecond GetEpochNo) - , ("GetCurrentPParams", SomeSecond GetCurrentPParams) - , ("GetProposedPParamsUpdates", SomeSecond GetProposedPParamsUpdates) - , ("GetStakeDistribution", SomeSecond GetStakeDistribution) - , ("GetNonMyopicMemberRewards", SomeSecond $ GetNonMyopicMemberRewards sleRewardsCredentials) - , ("GetGenesisConfig", SomeSecond GetGenesisConfig) + ("GetLedgerTip", SomeBlockQuery GetLedgerTip) + , ("GetEpochNo", SomeBlockQuery GetEpochNo) + , ("GetCurrentPParams", SomeBlockQuery GetCurrentPParams) + , ("GetProposedPParamsUpdates", SomeBlockQuery GetProposedPParamsUpdates) + , ("GetStakeDistribution", SomeBlockQuery GetStakeDistribution) + , ("GetNonMyopicMemberRewards", SomeBlockQuery $ GetNonMyopicMemberRewards sleRewardsCredentials) + , ("GetGenesisConfig", SomeBlockQuery GetGenesisConfig) ] results = labelled [ ("LedgerTip", SomeResult GetLedgerTip (blockPoint blk)) @@ -235,6 +271,7 @@ fromShelleyLedgerExamplesPraos ShelleyLedgerExamples { } , shelleyLedgerState = sleNewEpochState , shelleyLedgerTransition = ShelleyTransitionInfo {shelleyAfterVoting = 0} + , shelleyLedgerTables = emptyLedgerTables } chainDepState = translateChainDepState (Proxy @(TPraos (EraCrypto era), Praos (EraCrypto era))) $ TPraosState (NotOrigin 1) sleChainDepState @@ -242,8 +279,6 @@ fromShelleyLedgerExamplesPraos ShelleyLedgerExamples { ledgerState (genesisHeaderState chainDepState) - - examplesShelley :: Examples StandardShelleyBlock examplesShelley = fromShelleyLedgerExamples ledgerExamplesShelley diff --git a/ouroboros-consensus-cardano/src/unstable-shelley-testlib/Test/Consensus/Shelley/Generators.hs b/ouroboros-consensus-cardano/src/unstable-shelley-testlib/Test/Consensus/Shelley/Generators.hs index d71bf21450..1b4e87d54e 100644 --- a/ouroboros-consensus-cardano/src/unstable-shelley-testlib/Test/Consensus/Shelley/Generators.hs +++ b/ouroboros-consensus-cardano/src/unstable-shelley-testlib/Test/Consensus/Shelley/Generators.hs @@ -1,3 +1,4 @@ +{-# LANGUAGE DataKinds #-} {-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE FlexibleInstances #-} {-# LANGUAGE GADTs #-} @@ -131,19 +132,19 @@ instance CanMock proto era => Arbitrary (GenTx (ShelleyBlock proto era)) where instance CanMock proto era => Arbitrary (GenTxId (ShelleyBlock proto era)) where arbitrary = ShelleyTxId <$> arbitrary -instance CanMock proto era => Arbitrary (SomeSecond BlockQuery (ShelleyBlock proto era)) where +instance CanMock proto era => Arbitrary (SomeBlockQuery (BlockQuery (ShelleyBlock proto era))) where arbitrary = oneof - [ pure $ SomeSecond GetLedgerTip - , pure $ SomeSecond GetEpochNo - , SomeSecond . GetNonMyopicMemberRewards <$> arbitrary - , pure $ SomeSecond GetCurrentPParams - , pure $ SomeSecond GetProposedPParamsUpdates - , pure $ SomeSecond GetStakeDistribution - , pure $ SomeSecond DebugEpochState - , (\(SomeSecond q) -> SomeSecond (GetCBOR q)) <$> arbitrary - , SomeSecond . GetFilteredDelegationsAndRewardAccounts <$> arbitrary - , pure $ SomeSecond GetGenesisConfig - , pure $ SomeSecond DebugNewEpochState + [ pure $ SomeBlockQuery GetLedgerTip + , pure $ SomeBlockQuery GetEpochNo + , SomeBlockQuery . GetNonMyopicMemberRewards <$> arbitrary + , pure $ SomeBlockQuery GetCurrentPParams + , pure $ SomeBlockQuery GetProposedPParamsUpdates + , pure $ SomeBlockQuery GetStakeDistribution + , pure $ SomeBlockQuery DebugEpochState + , (\(SomeBlockQuery q) -> SomeBlockQuery (GetCBOR q)) <$> arbitrary + , SomeBlockQuery . GetFilteredDelegationsAndRewardAccounts <$> arbitrary + , pure $ SomeBlockQuery GetGenesisConfig + , pure $ SomeBlockQuery DebugNewEpochState ] instance CanMock proto era => Arbitrary (SomeResult (ShelleyBlock proto era)) where @@ -186,11 +187,21 @@ instance CanMock proto era=> Arbitrary (ShelleyTip proto era) where instance Arbitrary ShelleyTransition where arbitrary = ShelleyTransitionInfo <$> arbitrary -instance CanMock proto era => Arbitrary (LedgerState (ShelleyBlock proto era)) where +instance CanMock proto era + => Arbitrary (LedgerState (ShelleyBlock proto era) EmptyMK) where + arbitrary = ShelleyLedgerState + <$> arbitrary + <*> arbitrary + <*> arbitrary + <*> pure (LedgerTables EmptyMK) + +instance CanMock proto era + => Arbitrary (LedgerState (ShelleyBlock proto era) ValuesMK) where arbitrary = ShelleyLedgerState <$> arbitrary <*> arbitrary <*> arbitrary + <*> (LedgerTables . ValuesMK <$> arbitrary) instance CanMock proto era => Arbitrary (AnnTip (ShelleyBlock proto era)) where arbitrary = AnnTip @@ -224,8 +235,8 @@ instance PraosCrypto c => Arbitrary (SL.ChainDepState c) where -- make sure to not generate those queries in combination with -- 'ShelleyNodeToClientVersion1'. instance CanMock proto era - => Arbitrary (WithVersion ShelleyNodeToClientVersion (SomeSecond BlockQuery (ShelleyBlock proto era))) where + => Arbitrary (WithVersion ShelleyNodeToClientVersion (SomeBlockQuery (BlockQuery (ShelleyBlock proto era)))) where arbitrary = do - query@(SomeSecond q) <- arbitrary + query@(SomeBlockQuery q) <- arbitrary version <- arbitrary `suchThat` querySupportedVersion q return $ WithVersion version query diff --git a/ouroboros-consensus-cardano/src/unstable-shelley-testlib/Test/ThreadNet/TxGen/Shelley.hs b/ouroboros-consensus-cardano/src/unstable-shelley-testlib/Test/ThreadNet/TxGen/Shelley.hs index 732c8726dc..739cd81369 100644 --- a/ouroboros-consensus-cardano/src/unstable-shelley-testlib/Test/ThreadNet/TxGen/Shelley.hs +++ b/ouroboros-consensus-cardano/src/unstable-shelley-testlib/Test/ThreadNet/TxGen/Shelley.hs @@ -22,6 +22,7 @@ import Ouroboros.Consensus.Block import Ouroboros.Consensus.Config import Ouroboros.Consensus.Ledger.Abstract import Ouroboros.Consensus.Ledger.SupportsMempool +import Ouroboros.Consensus.Ledger.Tables.Utils import Ouroboros.Consensus.Protocol.TPraos (TPraos) import Ouroboros.Consensus.Shelley.HFEras () import Ouroboros.Consensus.Shelley.Ledger @@ -57,12 +58,12 @@ instance HashAlgorithm h => TxGen (ShelleyBlock (TPraos (MockCrypto h)) (MockShe -- When fixed, remove the True case keepig the else case below to re-enable -- the transaction generator. - | otherwise = - if True - then pure [] - else do - n <- choose (0, 20) - go [] n $ applyChainTick lcfg curSlotNo lst + | otherwise = do + n <- choose (0, 20) + go [] n + $ applyDiffs lst + $ applyChainTick lcfg curSlotNo + $ forgetLedgerTables lst where ShelleyTxGenExtra { stgeGenEnv @@ -74,7 +75,7 @@ instance HashAlgorithm h => TxGen (ShelleyBlock (TPraos (MockCrypto h)) (MockShe go :: [GenTx (ShelleyBlock (TPraos (MockCrypto h)) (MockShelley h))] -- ^ Accumulator -> Integer -- ^ Number of txs to still produce - -> TickedLedgerState (ShelleyBlock (TPraos (MockCrypto h)) (MockShelley h)) + -> TickedLedgerState (ShelleyBlock (TPraos (MockCrypto h)) (MockShelley h)) ValuesMK -> Gen [GenTx (ShelleyBlock (TPraos (MockCrypto h)) (MockShelley h))] go acc 0 _ = return (reverse acc) go acc n st = do @@ -84,13 +85,13 @@ instance HashAlgorithm h => TxGen (ShelleyBlock (TPraos (MockCrypto h)) (MockShe Just tx -> case runExcept $ fst <$> applyTx lcfg DoNotIntervene curSlotNo tx st of -- We don't mind generating invalid transactions Left _ -> go (tx:acc) (n - 1) st - Right st' -> go (tx:acc) (n - 1) st' + Right st' -> go (tx:acc) (n - 1) (applyDiffs st st') genTx :: forall h. HashAlgorithm h => TopLevelConfig (ShelleyBlock (TPraos (MockCrypto h)) (MockShelley h)) -> SlotNo - -> TickedLedgerState (ShelleyBlock (TPraos (MockCrypto h)) (MockShelley h)) + -> TickedLedgerState (ShelleyBlock (TPraos (MockCrypto h)) (MockShelley h)) ValuesMK -> Gen.GenEnv (MockShelley h) -> Gen (Maybe (GenTx (ShelleyBlock (TPraos (MockCrypto h)) (MockShelley h)))) genTx _cfg slotNo TickedShelleyLedgerState { tickedShelleyLedgerState } genEnv = diff --git a/ouroboros-consensus-cardano/test/byron-test/Main.hs b/ouroboros-consensus-cardano/test/byron-test/Main.hs index fb28bbd261..752811b26f 100644 --- a/ouroboros-consensus-cardano/test/byron-test/Main.hs +++ b/ouroboros-consensus-cardano/test/byron-test/Main.hs @@ -1,6 +1,7 @@ module Main (main) where import qualified Test.Consensus.Byron.Golden (tests) +import qualified Test.Consensus.Byron.LedgerTables (tests) import qualified Test.Consensus.Byron.Serialisation (tests) import Test.Tasty import qualified Test.ThreadNet.Byron (tests) @@ -15,6 +16,7 @@ tests :: TestTree tests = testGroup "byron" [ Test.Consensus.Byron.Golden.tests + , Test.Consensus.Byron.LedgerTables.tests , Test.Consensus.Byron.Serialisation.tests , Test.ThreadNet.Byron.tests , Test.ThreadNet.DualByron.tests diff --git a/ouroboros-consensus-cardano/test/byron-test/Test/Consensus/Byron/LedgerTables.hs b/ouroboros-consensus-cardano/test/byron-test/Test/Consensus/Byron/LedgerTables.hs new file mode 100644 index 0000000000..8e6c072869 --- /dev/null +++ b/ouroboros-consensus-cardano/test/byron-test/Test/Consensus/Byron/LedgerTables.hs @@ -0,0 +1,15 @@ +{-# LANGUAGE TypeApplications #-} + +module Test.Consensus.Byron.LedgerTables (tests) where + +import Ouroboros.Consensus.Byron.Ledger +import Test.Consensus.Byron.Generators () +import Test.LedgerTables +import Test.Tasty +import Test.Tasty.QuickCheck + +tests :: TestTree +tests = testGroup "LedgerTables" + [ testProperty "Stowable laws" (prop_stowable_laws @ByronBlock) + , testProperty "HasLedgerTables laws" (prop_hasledgertables_laws @ByronBlock) + ] diff --git a/ouroboros-consensus-cardano/test/byron-test/Test/ThreadNet/Byron.hs b/ouroboros-consensus-cardano/test/byron-test/Test/ThreadNet/Byron.hs index fb50268583..c8d81a9475 100644 --- a/ouroboros-consensus-cardano/test/byron-test/Test/ThreadNet/Byron.hs +++ b/ouroboros-consensus-cardano/test/byron-test/Test/ThreadNet/Byron.hs @@ -1,3 +1,4 @@ +{-# LANGUAGE DataKinds #-} {-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE GADTs #-} {-# LANGUAGE LambdaCase #-} @@ -47,6 +48,7 @@ import Ouroboros.Consensus.Byron.Ledger.Conversions import Ouroboros.Consensus.Byron.Node import Ouroboros.Consensus.Byron.Protocol import Ouroboros.Consensus.Config +import Ouroboros.Consensus.Ledger.Tables import Ouroboros.Consensus.Node.NetworkProtocolVersion import Ouroboros.Consensus.Node.ProtocolInfo import Ouroboros.Consensus.NodeId @@ -967,7 +969,7 @@ prop_simple_real_pbft_convergence TestSetup finalChains :: [(NodeId, Chain ByronBlock)] finalChains = Map.toList $ nodeOutputFinalChain <$> testOutputNodes testOutput - finalLedgers :: [(NodeId, Byron.LedgerState ByronBlock)] + finalLedgers :: [(NodeId, Byron.LedgerState ByronBlock EmptyMK)] finalLedgers = Map.toList $ nodeOutputFinalLedger <$> testOutputNodes testOutput pvuLabels :: [(NodeId, ProtocolVersionUpdateLabel)] diff --git a/ouroboros-consensus-cardano/test/byron-test/Test/ThreadNet/DualByron.hs b/ouroboros-consensus-cardano/test/byron-test/Test/ThreadNet/DualByron.hs index d06d4ef4dc..489654035f 100644 --- a/ouroboros-consensus-cardano/test/byron-test/Test/ThreadNet/DualByron.hs +++ b/ouroboros-consensus-cardano/test/byron-test/Test/ThreadNet/DualByron.hs @@ -1,3 +1,4 @@ +{-# LANGUAGE DataKinds #-} {-# LANGUAGE FlexibleInstances #-} {-# LANGUAGE RecordWildCards #-} {-# LANGUAGE ScopedTypeVariables #-} @@ -34,6 +35,7 @@ import Ouroboros.Consensus.Config import Ouroboros.Consensus.Ledger.Abstract import Ouroboros.Consensus.Ledger.Dual import Ouroboros.Consensus.Ledger.SupportsMempool +import Ouroboros.Consensus.Ledger.Tables.Utils import Ouroboros.Consensus.Node.ProtocolInfo import Ouroboros.Consensus.NodeId import Ouroboros.Consensus.Protocol.PBFT @@ -257,13 +259,16 @@ byronPBftParams ByronSpecGenesis{..} = instance TxGen DualByronBlock where testGenTxs _coreNodeId _numCoreNodes curSlotNo cfg () = \st -> do n <- choose (0, 20) - go [] n $ applyChainTick (configLedger cfg) curSlotNo st + go [] n + $ applyDiffs st + $ applyChainTick (configLedger cfg) curSlotNo + $ forgetLedgerTables st where -- Attempt to produce @n@ transactions -- Stops when the transaction generator cannot produce more txs go :: [GenTx DualByronBlock] -- Accumulator -> Integer -- Number of txs to still produce - -> TickedLedgerState DualByronBlock + -> TickedLedgerState DualByronBlock ValuesMK -> Gen [GenTx DualByronBlock] go acc 0 _ = return (reverse acc) go acc n st = do @@ -274,7 +279,8 @@ instance TxGen DualByronBlock where curSlotNo tx st of - Right (st', _vtx) -> go (tx:acc) (n - 1) st' + Right (st', _vtx) -> + go (tx:acc) (n - 1) (applyDiffs st st') Left _ -> error "testGenTxs: unexpected invalid tx" -- | Generate transaction @@ -284,7 +290,7 @@ instance TxGen DualByronBlock where -- for now. Extending the scope will require integration with the restart/rekey -- infrastructure of the Byron tests. genTx :: TopLevelConfig DualByronBlock - -> Ticked (LedgerState DualByronBlock) + -> TickedLedgerState DualByronBlock ValuesMK -> Gen (GenTx DualByronBlock) genTx cfg st = do aux <- sigGen (Rules.ctxtUTXOW cfg') st' diff --git a/ouroboros-consensus-cardano/test/cardano-test/Main.hs b/ouroboros-consensus-cardano/test/cardano-test/Main.hs index 4e67cb2095..3a18855383 100644 --- a/ouroboros-consensus-cardano/test/cardano-test/Main.hs +++ b/ouroboros-consensus-cardano/test/cardano-test/Main.hs @@ -5,9 +5,10 @@ import qualified Test.Consensus.Cardano.ByronCompatibility import qualified Test.Consensus.Cardano.DiffusionPipelining import qualified Test.Consensus.Cardano.Golden import qualified Test.Consensus.Cardano.MiniProtocol.LocalTxSubmission.Server -import qualified Test.Consensus.Cardano.Serialisation +import qualified Test.Consensus.Cardano.Serialisation (tests) import qualified Test.Consensus.Cardano.SupportedNetworkProtocolVersion import qualified Test.Consensus.Cardano.SupportsSanityCheck +import qualified Test.Consensus.Cardano.Translation (tests) import Test.Tasty import qualified Test.ThreadNet.AllegraMary import qualified Test.ThreadNet.Cardano @@ -35,4 +36,5 @@ tests = , Test.ThreadNet.MaryAlonzo.tests , Test.ThreadNet.ShelleyAllegra.tests , Test.Consensus.Cardano.MiniProtocol.LocalTxSubmission.Server.tests + , Test.Consensus.Cardano.Translation.tests ] diff --git a/ouroboros-consensus-cardano/test/cardano-test/Test/Consensus/Cardano/ByronCompatibility.hs b/ouroboros-consensus-cardano/test/cardano-test/Test/Consensus/Cardano/ByronCompatibility.hs index 437670c1f4..f08682349e 100644 --- a/ouroboros-consensus-cardano/test/cardano-test/Test/Consensus/Cardano/ByronCompatibility.hs +++ b/ouroboros-consensus-cardano/test/cardano-test/Test/Consensus/Cardano/ByronCompatibility.hs @@ -7,6 +7,7 @@ {-# LANGUAGE StandaloneDeriving #-} {-# LANGUAGE TypeApplications #-} {-# LANGUAGE TypeFamilies #-} +{-# LANGUAGE TypeOperators #-} {-# LANGUAGE UndecidableInstances #-} {-# OPTIONS_GHC -Wno-incomplete-uni-patterns #-} @@ -137,11 +138,11 @@ toCardanoCodecConfig codecConfigByron = -- -- Note that ledger state and all other types stored as part of the ledger -- snapshot are __not__ forwards compatible. -newtype ByronToCardano = B2C { unB2C :: ByronBlock } deriving (Eq, Show) -newtype instance Header ByronToCardano = HeaderB2C { unHeaderB2C :: Header ByronBlock } deriving (Eq, Show) -newtype instance GenTx ByronToCardano = GenTxB2C { unGenTxB2C :: GenTx ByronBlock } deriving (Eq, Show) -newtype instance TxId (GenTx ByronToCardano) = GenTxIdB2C { unGenTxIdB2C :: GenTxId ByronBlock } deriving (Eq, Show) -newtype instance BlockQuery ByronToCardano a = QueryB2C { unQueryB2C :: BlockQuery ByronBlock a } deriving (Eq, Show) +newtype ByronToCardano = B2C { unB2C :: ByronBlock } deriving (Eq, Show) +newtype instance Header ByronToCardano = HeaderB2C { unHeaderB2C :: Header ByronBlock } deriving (Eq, Show) +newtype instance GenTx ByronToCardano = GenTxB2C { unGenTxB2C :: GenTx ByronBlock } deriving (Eq, Show) +newtype instance TxId (GenTx ByronToCardano) = GenTxIdB2C { unGenTxIdB2C :: GenTxId ByronBlock } deriving (Eq, Show) +newtype instance BlockQuery ByronToCardano fp a = QueryB2C { unQueryB2C :: BlockQuery ByronBlock fp a } deriving (Eq, Show) newtype instance NestedCtxt_ ByronToCardano f a where NestedCtxt_B2C :: NestedCtxt_ ByronBlock f a @@ -174,11 +175,11 @@ instance HasNestedContent Header ByronToCardano where nest (DepPair ctxt a) = HeaderB2C $ nest (DepPair (mapNestedCtxt unNestedCtxt_B2C ctxt) a) -instance ShowQuery (BlockQuery ByronToCardano) where +instance ShowQuery (BlockQuery ByronToCardano fp) where showResult (QueryB2C query) = showResult query -instance SameDepIndex (BlockQuery ByronToCardano) where - sameDepIndex (QueryB2C q1) (QueryB2C q2) = sameDepIndex q1 q2 +instance SameDepIndex2 (BlockQuery ByronToCardano) where + sameDepIndex2 (QueryB2C q1) (QueryB2C q2) = sameDepIndex2 q1 q2 {------------------------------------------------------------------------------ Byron to Cardano: Disk @@ -380,24 +381,25 @@ instance SerialiseNodeToClient ByronToCardano CC.ApplyMempoolPayloadErr where encodeNodeToClient = encodeNodeToClientB2C (Proxy @WrapApplyTxErr) id decodeNodeToClient = decodeNodeToClientB2C (Proxy @WrapApplyTxErr) (\(ApplyTxErrByron err) -> err) -instance SerialiseNodeToClient ByronToCardano (SomeSecond BlockQuery ByronToCardano) where +instance SerialiseNodeToClient ByronToCardano (SomeBlockQuery (BlockQuery ByronToCardano)) where encodeNodeToClient = encodeNodeToClientB2C - (Proxy @(SomeSecond BlockQuery)) - (\(SomeSecond q) -> SomeSecond (unQueryB2C q)) + (Proxy @(SomeBlockQuery :.: BlockQuery)) + (\(SomeBlockQuery q) -> SomeBlockQuery (unQueryB2C q)) decodeNodeToClient = decodeNodeToClientB2C - (Proxy @(SomeSecond BlockQuery)) - (\(SomeSecond (QueryIfCurrentByron q)) -> SomeSecond (QueryB2C q)) + (Proxy @(SomeBlockQuery :.: BlockQuery)) + (\(SomeBlockQuery (QueryIfCurrentByron q)) -> SomeBlockQuery (QueryB2C q)) -instance SerialiseResult ByronToCardano (BlockQuery ByronToCardano) where - encodeResult (CodecConfigB2C ccfg) () (QueryB2C q) r = - encodeResult ccfg byronNodeToClientVersion q r - decodeResult (CodecConfigB2C ccfg) () (QueryB2C (q :: BlockQuery ByronBlock result)) = +instance SerialiseResult' ByronToCardano BlockQuery where + encodeResult' (CodecConfigB2C ccfg) () (QueryB2C q) r = + encodeResult' ccfg byronNodeToClientVersion q r + decodeResult' (CodecConfigB2C ccfg) () (QueryB2C (q :: BlockQuery ByronBlock fp result)) = (\(QueryResultSuccess r) -> r) <$> - decodeResult + decodeResult' (toCardanoCodecConfig ccfg) cardanoNodeToClientVersion (QueryIfCurrentByron q :: CardanoQuery Crypto + fp (CardanoQueryResult Crypto result)) instance SerialiseNodeToClientConstraints ByronToCardano @@ -425,8 +427,8 @@ instance Arbitrary (GenTx ByronToCardano) where instance Arbitrary (GenTxId ByronToCardano) where arbitrary = GenTxIdB2C <$> arbitrary -instance Arbitrary (SomeSecond BlockQuery ByronToCardano) where - arbitrary = (\(SomeSecond q) -> SomeSecond (QueryB2C q)) <$> arbitrary +instance Arbitrary (SomeBlockQuery (BlockQuery ByronToCardano)) where + arbitrary = (\(SomeBlockQuery q) -> SomeBlockQuery (QueryB2C q)) <$> arbitrary instance Arbitrary (SomeResult ByronToCardano) where arbitrary = (\(SomeResult q r) -> SomeResult (QueryB2C q) r) <$> arbitrary @@ -443,11 +445,11 @@ instance Arbitrary (SomeResult ByronToCardano) where -- -- Note that ledger state and all other types stored as part of the ledger -- snapshot are __not__ forwards compatible. -newtype CardanoToByron = C2B { unC2B :: ByronBlock } deriving (Eq, Show) -newtype instance Header CardanoToByron = HeaderC2B { unHeaderC2B :: Header ByronBlock } deriving (Eq, Show) -newtype instance GenTx CardanoToByron = GenTxC2B { unGenTxC2B :: GenTx ByronBlock } deriving (Eq, Show) -newtype instance TxId (GenTx CardanoToByron) = GenTxIdC2B { unGenTxIdC2B :: GenTxId ByronBlock } deriving (Eq, Show) -newtype instance BlockQuery CardanoToByron a = QueryC2B { unQueryC2B :: BlockQuery ByronBlock a } deriving (Eq, Show) +newtype CardanoToByron = C2B { unC2B :: ByronBlock } deriving (Eq, Show) +newtype instance Header CardanoToByron = HeaderC2B { unHeaderC2B :: Header ByronBlock } deriving (Eq, Show) +newtype instance GenTx CardanoToByron = GenTxC2B { unGenTxC2B :: GenTx ByronBlock } deriving (Eq, Show) +newtype instance TxId (GenTx CardanoToByron) = GenTxIdC2B { unGenTxIdC2B :: GenTxId ByronBlock } deriving (Eq, Show) +newtype instance BlockQuery CardanoToByron fp a = QueryC2B { unQueryC2B :: BlockQuery ByronBlock fp a } deriving (Eq, Show) newtype instance NestedCtxt_ CardanoToByron f a where NestedCtxt_C2B :: NestedCtxt_ ByronBlock f a @@ -480,11 +482,11 @@ instance HasNestedContent Header CardanoToByron where nest (DepPair ctxt a) = HeaderC2B $ nest (DepPair (mapNestedCtxt unNestedCtxt_C2B ctxt) a) -instance ShowQuery (BlockQuery CardanoToByron) where +instance ShowQuery (BlockQuery CardanoToByron fp) where showResult (QueryC2B query) = showResult query -instance SameDepIndex (BlockQuery CardanoToByron) where - sameDepIndex (QueryC2B q1) (QueryC2B q2) = sameDepIndex q1 q2 +instance SameDepIndex2 (BlockQuery CardanoToByron) where + sameDepIndex2 (QueryC2B q1) (QueryC2B q2) = sameDepIndex2 q1 q2 {------------------------------------------------------------------------------ Cardano to Byron: Disk @@ -670,25 +672,25 @@ instance SerialiseNodeToClient CardanoToByron CC.ApplyMempoolPayloadErr where encodeNodeToClient = encodeNodeToClientC2B (Proxy @WrapApplyTxErr) ApplyTxErrByron decodeNodeToClient = decodeNodeToClientC2B (Proxy @WrapApplyTxErr) id -instance SerialiseNodeToClient CardanoToByron (SomeSecond BlockQuery CardanoToByron) where +instance SerialiseNodeToClient CardanoToByron (SomeBlockQuery (BlockQuery CardanoToByron)) where encodeNodeToClient = encodeNodeToClientC2B - (Proxy @(SomeSecond BlockQuery)) - (\(SomeSecond q) -> SomeSecond (QueryIfCurrentByron (unQueryC2B q))) + (Proxy @(SomeBlockQuery :.: BlockQuery)) + (\(SomeBlockQuery q) -> SomeBlockQuery (QueryIfCurrentByron (unQueryC2B q))) decodeNodeToClient = decodeNodeToClientC2B - (Proxy @(SomeSecond BlockQuery)) - (\(SomeSecond q) -> SomeSecond (QueryC2B q)) + (Proxy @(SomeBlockQuery :.: BlockQuery)) + (\(SomeBlockQuery q) -> SomeBlockQuery (QueryC2B q)) -instance SerialiseResult CardanoToByron (BlockQuery CardanoToByron) where - encodeResult (CodecConfigC2B ccfg) () (QueryC2B q) (r :: result) = - encodeResult +instance SerialiseResult' CardanoToByron BlockQuery where + encodeResult' (CodecConfigC2B ccfg) () (QueryC2B q) (r :: result) = + encodeResult' (toCardanoCodecConfig ccfg) cardanoNodeToClientVersion (QueryIfCurrentByron q) (QueryResultSuccess r :: CardanoQueryResult Crypto result) - decodeResult (CodecConfigC2B ccfg) () (QueryC2B q) = - decodeResult ccfg byronNodeToClientVersion q + decodeResult' (CodecConfigC2B ccfg) () (QueryC2B q) = + decodeResult' ccfg byronNodeToClientVersion q instance SerialiseNodeToClientConstraints CardanoToByron @@ -715,8 +717,8 @@ instance Arbitrary (GenTx CardanoToByron) where instance Arbitrary (GenTxId CardanoToByron) where arbitrary = GenTxIdC2B <$> arbitrary -instance Arbitrary (SomeSecond BlockQuery CardanoToByron) where - arbitrary = (\(SomeSecond q) -> SomeSecond (QueryC2B q)) <$> arbitrary +instance Arbitrary (SomeBlockQuery (BlockQuery CardanoToByron)) where + arbitrary = (\(SomeBlockQuery q) -> SomeBlockQuery (QueryC2B q)) <$> arbitrary instance Arbitrary (SomeResult CardanoToByron) where arbitrary = (\(SomeResult q r) -> SomeResult (QueryC2B q) r) <$> arbitrary diff --git a/ouroboros-consensus-cardano/test/cardano-test/Test/Consensus/Cardano/Translation.hs b/ouroboros-consensus-cardano/test/cardano-test/Test/Consensus/Cardano/Translation.hs new file mode 100644 index 0000000000..7245314436 --- /dev/null +++ b/ouroboros-consensus-cardano/test/cardano-test/Test/Consensus/Cardano/Translation.hs @@ -0,0 +1,396 @@ +{-# LANGUAGE BlockArguments #-} +{-# LANGUAGE DataKinds #-} +{-# LANGUAGE FlexibleContexts #-} +{-# LANGUAGE FlexibleInstances #-} +{-# LANGUAGE MultiParamTypeClasses #-} +{-# LANGUAGE NamedFieldPuns #-} +{-# LANGUAGE PolyKinds #-} +{-# LANGUAGE RankNTypes #-} +{-# LANGUAGE ScopedTypeVariables #-} +{-# LANGUAGE StandaloneDeriving #-} +{-# LANGUAGE TypeFamilies #-} +{-# LANGUAGE UndecidableInstances #-} +{-# OPTIONS_GHC -Wno-orphans #-} +module Test.Consensus.Cardano.Translation (tests) where + +import qualified Cardano.Chain.Block as Byron +import qualified Cardano.Chain.UTxO as Byron +import Cardano.Ledger.Alonzo () +import Cardano.Ledger.BaseTypes (Network (Testnet), TxIx (..)) +import qualified Cardano.Ledger.Core as Core +import qualified Cardano.Ledger.Crypto as Crypto +import qualified Cardano.Ledger.Genesis as Genesis +import Cardano.Ledger.Shelley.API + (NewEpochState (stashedAVVMAddresses), ShelleyGenesis (..), + ShelleyGenesisStaking (..), TxIn (..), + translateCompactTxOutByronToShelley, + translateTxIdByronToShelley) +import Cardano.Ledger.Shelley.LedgerState (esLState, lsUTxOState, + nesEs, utxosUtxo) +import Cardano.Ledger.Shelley.PParams (emptyPParams) +import Cardano.Ledger.Shelley.Translation +import Cardano.Ledger.Shelley.UTxO (UTxO (..)) +import Cardano.Slotting.EpochInfo (fixedEpochInfo) +import Cardano.Slotting.Slot (EpochNo (..)) +import qualified Data.ListMap as ListMap +import qualified Data.Map.Strict as Map +import Data.SOP.BasicFunctors +import Data.SOP.Functors +import Data.SOP.InPairs (RequiringBoth (..), provideBoth) +import Ouroboros.Consensus.BlockchainTime.WallClock.Types + (slotLengthFromSec) +import Ouroboros.Consensus.Byron.Ledger (ByronBlock, byronLedgerState) +import Ouroboros.Consensus.Cardano.Block (CardanoEras) +import Ouroboros.Consensus.Cardano.CanHardFork +import Ouroboros.Consensus.Cardano.CanHardFork () +import Ouroboros.Consensus.HardFork.Combinator (InPairs (..), + hardForkEraTranslation, translateLedgerState) +import Ouroboros.Consensus.HardFork.Combinator.State.Types + (TranslateLedgerState (TranslateLedgerState, translateLedgerStateWith)) +import Ouroboros.Consensus.Ledger.Basics (LedgerCfg, LedgerConfig, + LedgerState) +import Ouroboros.Consensus.Ledger.Tables +import Ouroboros.Consensus.Ledger.Tables.Diff (Diff) +import qualified Ouroboros.Consensus.Ledger.Tables.Diff as Diff +import Ouroboros.Consensus.Ledger.Tables.Utils +import Ouroboros.Consensus.Protocol.Praos +import Ouroboros.Consensus.Protocol.TPraos (TPraos) +import Ouroboros.Consensus.Shelley.Eras +import Ouroboros.Consensus.Shelley.HFEras () +import Ouroboros.Consensus.Shelley.Ledger (ShelleyBlock, + ShelleyLedgerConfig, mkShelleyLedgerConfig, + shelleyLedgerState, shelleyLedgerTables) +import Ouroboros.Consensus.Shelley.Ledger.SupportsProtocol () +import Ouroboros.Consensus.TypeFamilyWrappers +import Ouroboros.Consensus.Util (dimap) +import Test.Cardano.Ledger.Alonzo.Serialisation.Generators () +import Test.Cardano.Ledger.Babbage.Serialisation.Generators () +import Test.Cardano.Ledger.Conway.Arbitrary () +import Test.Cardano.Ledger.Shelley.Utils (unsafeBoundRational) +import Test.Consensus.Byron.Generators (genByronLedgerConfig, + genByronLedgerState) +import Test.Consensus.Cardano.MockCrypto (MockCryptoCompatByron) +import Test.Consensus.Shelley.Generators () +import Test.Consensus.Shelley.MockCrypto +import Test.QuickCheck +import Test.Tasty +import Test.Tasty.QuickCheck +import Test.Util.Time (dawnOfTime) + +-- Definitions to make the signatures a bit less unwieldy +type Crypto = MockCryptoCompatByron +type Proto = TPraos Crypto + +tests :: TestTree +tests = testGroup "UpdateTablesOnEraTransition" + [ testTablesTranslation "Byron to Shelley" + byronToShelleyLedgerStateTranslation + byronUtxosAreInsertsInShelleyUtxoDiff + (\st -> cover 50 ( nonEmptyUtxosByron st) "UTxO set is not empty" + -- The Byron ledger generators are very + -- unlikely to generate an empty UTxO, but we + -- want to test with the empty UTxO as well. + -- See 'Test.Cardano.Chain.UTxO.Gen.genUTxO' + -- and the @Arbitrary + -- 'Cardano.Chain.UTxO.UTxO'@ instance in + -- "Test.Consensus.Byron.Generators". + . cover 0.1 (not $ nonEmptyUtxosByron st) "UTxO set is empty" + ) + , testTablesTranslation "Shelley to Allegra" + shelleyToAllegraLedgerStateTranslation + shelleyAvvmAddressesAreDeletesInUtxoDiff + (\st -> cover 50 (nonEmptyAvvmAddresses st) "AVVM set is not empty") + , testTablesTranslation "Allegra to Mary" + allegraToMaryLedgerStateTranslation + utxoTablesAreEmpty + (\st -> cover 50 (nonEmptyUtxosShelley st) "UTxO set is not empty") + , testTablesTranslation "Mary to Alonzo" + maryToAlonzoLedgerStateTranslation + utxoTablesAreEmpty + (\st -> cover 50 (nonEmptyUtxosShelley st) "UTxO set is not empty") + , testTablesTranslation "Alonzo to Babbage" + alonzoToBabbageLedgerStateTranslation + utxoTablesAreEmpty + (\st -> cover 50 (nonEmptyUtxosShelley st) "UTxO set is not empty") + , testTablesTranslation "Babbage to Conway" + babbageToConwayLedgerStateTranslation + utxoTablesAreEmpty + (\st -> cover 50 (nonEmptyUtxosShelley st) "UTxO set is not empty") + ] + + +{------------------------------------------------------------------------------- + Ledger-state translations between eras that we test in this module +-------------------------------------------------------------------------------} + +-- | TODO: we should simply expose 'translateLedgerStateByronToShelleyWrapper' +-- and other translations in ' Ouroboros.Consensus.Cardano.CanHardFork'. +byronToShelleyLedgerStateTranslation :: + RequiringBoth + WrapLedgerConfig + TranslateLedgerState + ByronBlock + (ShelleyBlock (TPraos Crypto) (ShelleyEra Crypto)) +shelleyToAllegraLedgerStateTranslation :: RequiringBoth + WrapLedgerConfig + TranslateLedgerState + (ShelleyBlock (TPraos Crypto) (ShelleyEra Crypto)) + (ShelleyBlock (TPraos Crypto) (AllegraEra Crypto)) +allegraToMaryLedgerStateTranslation :: RequiringBoth + WrapLedgerConfig + TranslateLedgerState + (ShelleyBlock (TPraos Crypto) (AllegraEra Crypto)) + (ShelleyBlock (TPraos Crypto) (MaryEra Crypto)) +maryToAlonzoLedgerStateTranslation :: RequiringBoth + WrapLedgerConfig + TranslateLedgerState + (ShelleyBlock (TPraos Crypto) (MaryEra Crypto)) + (ShelleyBlock (TPraos Crypto) (AlonzoEra Crypto)) +alonzoToBabbageLedgerStateTranslation :: RequiringBoth + WrapLedgerConfig + TranslateLedgerState + (ShelleyBlock (TPraos Crypto) (AlonzoEra Crypto)) + (ShelleyBlock (Praos Crypto) (BabbageEra Crypto)) +PCons byronToShelleyLedgerStateTranslation + (PCons shelleyToAllegraLedgerStateTranslation + (PCons allegraToMaryLedgerStateTranslation + (PCons maryToAlonzoLedgerStateTranslation + (PCons alonzoToBabbageLedgerStateTranslation + (PCons _ + PNil))))) = tls + where + tls :: InPairs + (RequiringBoth WrapLedgerConfig TranslateLedgerState) + (CardanoEras Crypto) + tls = translateLedgerState hardForkEraTranslation + +babbageToConwayLedgerStateTranslation :: RequiringBoth + WrapLedgerConfig + TranslateLedgerState + (ShelleyBlock (Praos Crypto) (BabbageEra Crypto)) + (ShelleyBlock (Praos Crypto) (ConwayEra Crypto)) +babbageToConwayLedgerStateTranslation = translateLedgerStateBabbageToConwayWrapper + +-- | Tech debt: The babbage to conway translation performs a tick, and we would +-- need to create a reasonable ledger state. Instead this is just a copy-paste +-- of the code without the tick. +-- +-- This should be fixed once the real translation is fixed. +translateLedgerStateBabbageToConwayWrapper :: + RequiringBoth + WrapLedgerConfig + TranslateLedgerState + (ShelleyBlock (Praos Crypto) (BabbageEra Crypto)) + (ShelleyBlock (Praos Crypto) (ConwayEra Crypto)) +translateLedgerStateBabbageToConwayWrapper = + RequireBoth $ \_ cfgConway -> + TranslateLedgerState $ \_ -> + noNewTickingDiffs + . unFlip + . unComp + . Core.translateEra' (getConwayTranslationContext cfgConway) + . Comp + . Flip + +-- | Check that the tables are correctly translated from one era to the next. +testTablesTranslation :: + forall srcBlk dstBlk. + ( Arbitrary (TestSetup srcBlk dstBlk) + , Show (LedgerCfg (LedgerState srcBlk)) + , Show (LedgerCfg (LedgerState dstBlk)) + , Show (LedgerState srcBlk EmptyMK) + ) + => String + -- ^ Property label + -> RequiringBoth + WrapLedgerConfig + TranslateLedgerState + srcBlk + dstBlk + -> (LedgerState srcBlk EmptyMK -> LedgerState dstBlk DiffMK -> Bool) + -> (LedgerState srcBlk EmptyMK -> Property -> Property) + -- ^ Coverage testing function + -> TestTree +testTablesTranslation propLabel translateWithConfig translationShouldSatisfy ledgerStateShouldCover = + testProperty propLabel withTestSetup + where + withTestSetup :: TestSetup srcBlk dstBlk -> Property + withTestSetup ts = + checkCoverage $ ledgerStateShouldCover tsSrcLedgerState + $ property + $ translationShouldSatisfy tsSrcLedgerState destState + where + TestSetup {tsSrcLedgerConfig, tsDestLedgerConfig, tsSrcLedgerState, tsEpochNo} = ts + destState = translateLedgerStateWith translation tsEpochNo tsSrcLedgerState + where + translation :: TranslateLedgerState srcBlk dstBlk + translation = provideBoth translateWithConfig + (WrapLedgerConfig tsSrcLedgerConfig) + (WrapLedgerConfig tsDestLedgerConfig) + +{------------------------------------------------------------------------------- + Specific predicates +-------------------------------------------------------------------------------} + +byronUtxosAreInsertsInShelleyUtxoDiff + :: LedgerState ByronBlock EmptyMK + -> LedgerState (ShelleyBlock Proto (ShelleyEra Crypto)) DiffMK + -> Bool +byronUtxosAreInsertsInShelleyUtxoDiff srcLedgerState destLedgerState = + toNextUtxoDiff srcLedgerState == extractUtxoDiff destLedgerState + where + toNextUtxoDiff + :: LedgerState ByronBlock mk + -> Diff.Diff (TxIn Crypto) (Core.TxOut (ShelleyEra Crypto)) + toNextUtxoDiff ledgerState = + let + Byron.UTxO utxo = Byron.cvsUtxo $ byronLedgerState ledgerState + keyFn = translateTxInByronToShelley . Byron.fromCompactTxIn + valFn = Diff.Insert . translateCompactTxOutByronToShelley + in + Diff.Diff $ dimap keyFn valFn utxo + + translateTxInByronToShelley :: Byron.TxIn -> TxIn Crypto + translateTxInByronToShelley byronTxIn = + let + Byron.TxInUtxo txId txIx = byronTxIn + shelleyTxId' = translateTxIdByronToShelley txId + in + TxIn shelleyTxId' (TxIx $ fromIntegral txIx) + +shelleyAvvmAddressesAreDeletesInUtxoDiff + :: LedgerState (ShelleyBlock Proto (ShelleyEra Crypto)) EmptyMK + -> LedgerState (ShelleyBlock Proto (AllegraEra Crypto)) DiffMK + -> Bool +shelleyAvvmAddressesAreDeletesInUtxoDiff srcLedgerState destLedgerState = + toNextUtxoDiff srcLedgerState == extractUtxoDiff destLedgerState + where + toNextUtxoDiff + :: LedgerState (ShelleyBlock Proto (ShelleyEra Crypto)) EmptyMK + -> Diff.Diff (TxIn Crypto) (Core.TxOut (AllegraEra Crypto)) + toNextUtxoDiff = avvmAddressesToUtxoDiff . stashedAVVMAddresses . shelleyLedgerState + avvmAddressesToUtxoDiff (UTxO m) = Diff.Diff $ dimap id (\_ -> Diff.Delete) m + +utxoTablesAreEmpty + :: LedgerState (ShelleyBlock srcProto srcEra) EmptyMK + -> LedgerState (ShelleyBlock destProto destEra) DiffMK + -> Bool +utxoTablesAreEmpty _ destLedgerState = Diff.null $ extractUtxoDiff destLedgerState + +nonEmptyUtxosByron :: LedgerState ByronBlock EmptyMK -> Bool +nonEmptyUtxosByron ledgerState = + let Byron.UTxO utxo = Byron.cvsUtxo $ byronLedgerState ledgerState + in not $ Map.null utxo + +nonEmptyUtxosShelley :: LedgerState (ShelleyBlock proto era) EmptyMK -> Bool +nonEmptyUtxosShelley ledgerState = + let UTxO m = utxosUtxo $ lsUTxOState $ esLState $ nesEs $ shelleyLedgerState ledgerState + in not $ Map.null m + +nonEmptyAvvmAddresses :: LedgerState (ShelleyBlock Proto (ShelleyEra Crypto)) EmptyMK -> Bool +nonEmptyAvvmAddresses ledgerState = + let UTxO m = stashedAVVMAddresses $ shelleyLedgerState ledgerState + in not $ Map.null m + +{------------------------------------------------------------------------------- + Utilities +-------------------------------------------------------------------------------} + +extractUtxoDiff + :: LedgerState (ShelleyBlock proto era) DiffMK + -> Diff (TxIn (EraCrypto era)) (Core.TxOut era) +extractUtxoDiff shelleyLedgerState = + let DiffMK tables = getLedgerTables $ shelleyLedgerTables shelleyLedgerState + in tables + +{------------------------------------------------------------------------------- + TestSetup +-------------------------------------------------------------------------------} + +data TestSetup src dest = TestSetup { + tsSrcLedgerConfig :: LedgerConfig src + , tsDestLedgerConfig :: LedgerConfig dest + , tsSrcLedgerState :: LedgerState src EmptyMK + , tsEpochNo :: EpochNo +} + +deriving instance ( Show (LedgerConfig src) + , Show (LedgerConfig dest) + , Show (LedgerState src EmptyMK)) => Show (TestSetup src dest) + +instance Arbitrary (TestSetup ByronBlock (ShelleyBlock Proto (ShelleyEra Crypto))) where + arbitrary = + let ledgerConfig = fixedShelleyLedgerConfig emptyFromByronTranslationContext + in TestSetup <$> genByronLedgerConfig + <*> pure ledgerConfig + <*> genByronLedgerState + <*> (EpochNo <$> arbitrary) + +instance Arbitrary (TestSetup (ShelleyBlock Proto (ShelleyEra Crypto)) + (ShelleyBlock Proto (AllegraEra Crypto))) where + arbitrary = TestSetup (fixedShelleyLedgerConfig emptyFromByronTranslationContext) + (fixedShelleyLedgerConfig Genesis.NoGenesis) + <$> genShelleyLedgerState + <*> (EpochNo <$> arbitrary) + +instance Arbitrary (TestSetup (ShelleyBlock Proto (AllegraEra Crypto)) + (ShelleyBlock Proto (MaryEra Crypto))) where + arbitrary = TestSetup (fixedShelleyLedgerConfig Genesis.NoGenesis) + (fixedShelleyLedgerConfig Genesis.NoGenesis) + <$> genShelleyLedgerState + <*> (EpochNo <$> arbitrary) + +instance Arbitrary (TestSetup (ShelleyBlock Proto (MaryEra Crypto)) + (ShelleyBlock Proto (AlonzoEra Crypto))) where + arbitrary = TestSetup (fixedShelleyLedgerConfig Genesis.NoGenesis) + <$> (fixedShelleyLedgerConfig <$> arbitrary) + <*> genShelleyLedgerState + <*> (EpochNo <$> arbitrary) + +instance Arbitrary (TestSetup (ShelleyBlock (TPraos Crypto) (AlonzoEra Crypto)) + (ShelleyBlock (Praos Crypto) (BabbageEra Crypto))) where + arbitrary = TestSetup <$> (fixedShelleyLedgerConfig <$> arbitrary) + <*> (pure $ fixedShelleyLedgerConfig Genesis.NoGenesis) + <*> genShelleyLedgerState + <*> (EpochNo <$> arbitrary) + +instance Arbitrary (TestSetup (ShelleyBlock (Praos Crypto) (BabbageEra Crypto)) + (ShelleyBlock (Praos Crypto) (ConwayEra Crypto))) where + arbitrary = TestSetup <$> (pure $ fixedShelleyLedgerConfig Genesis.NoGenesis) + <*> (fixedShelleyLedgerConfig <$> arbitrary) + <*> genShelleyLedgerState + <*> (EpochNo <$> arbitrary) + +{------------------------------------------------------------------------------- + Generators +-------------------------------------------------------------------------------} + +genShelleyLedgerState :: CanMock proto era => Gen (LedgerState (ShelleyBlock proto era) EmptyMK) +genShelleyLedgerState = arbitrary + +-- | A fixed ledger config should be sufficient as the updating of the ledger +-- tables on era transitions does not depend on the configurations of any of +-- the ledgers involved. +fixedShelleyLedgerConfig :: (Crypto.Crypto (EraCrypto era)) => Core.TranslationContext era -> ShelleyLedgerConfig era +fixedShelleyLedgerConfig translationContext = mkShelleyLedgerConfig + shelleyGenesis + translationContext + (fixedEpochInfo (sgEpochLength shelleyGenesis) (slotLengthFromSec 2)) + where + shelleyGenesis = ShelleyGenesis { + sgSystemStart = dawnOfTime + , sgNetworkMagic = 0 + , sgNetworkId = Testnet + , sgActiveSlotsCoeff = unsafeBoundRational 0.8 + , sgSecurityParam = 10 + , sgEpochLength = 10 + , sgSlotsPerKESPeriod = 10 + , sgMaxKESEvolutions = 10 + , sgSlotLength = 10 + , sgUpdateQuorum = 6 + , sgMaxLovelaceSupply = 10 + , sgProtocolParams = emptyPParams + , sgGenDelegs = Map.empty + , sgInitialFunds = ListMap.empty + , sgStaking = ShelleyGenesisStaking ListMap.empty ListMap.empty + } diff --git a/ouroboros-consensus-cardano/test/cardano-test/Test/ThreadNet/Cardano.hs b/ouroboros-consensus-cardano/test/cardano-test/Test/ThreadNet/Cardano.hs index f0dccc8304..a2c0c5623b 100644 --- a/ouroboros-consensus-cardano/test/cardano-test/Test/ThreadNet/Cardano.hs +++ b/ouroboros-consensus-cardano/test/cardano-test/Test/ThreadNet/Cardano.hs @@ -30,6 +30,7 @@ import Data.Maybe (maybeToList) import Data.Proxy (Proxy (..)) import Data.Set (Set) import qualified Data.Set as Set +import Data.SOP.Functors import Data.Word (Word64) import Lens.Micro import Ouroboros.Consensus.Block.Forging (BlockForging) @@ -51,6 +52,7 @@ import Ouroboros.Consensus.Node.NetworkProtocolVersion import Ouroboros.Consensus.Node.ProtocolInfo import Ouroboros.Consensus.NodeId import Ouroboros.Consensus.Protocol.PBFT +import Ouroboros.Consensus.Shelley.HFEras () import Ouroboros.Consensus.Shelley.Ledger.SupportsProtocol () import Ouroboros.Consensus.Shelley.Node import Ouroboros.Consensus.Util.IOLike (IOLike) @@ -529,9 +531,9 @@ setByronProtVer = modifyExtLedger f elgr = elgr { ledgerState = f (ledgerState elgr ) } modifyHFLedgerState :: - (LedgerState x -> LedgerState x) - -> LedgerState (HardForkBlock (x : xs)) - -> LedgerState (HardForkBlock (x : xs)) + (LedgerState x mk -> LedgerState x mk) + -> LedgerState (HardForkBlock (x : xs)) mk + -> LedgerState (HardForkBlock (x : xs)) mk modifyHFLedgerState f (HardForkLedgerState (HardForkState (TZ st))) = - HardForkLedgerState (HardForkState (TZ st {currentState = f (currentState st)})) + HardForkLedgerState (HardForkState (TZ st {currentState = Flip $ f (unFlip $ currentState st)})) modifyHFLedgerState _ st = st diff --git a/ouroboros-consensus-cardano/test/shelley-test/Main.hs b/ouroboros-consensus-cardano/test/shelley-test/Main.hs index f50c0c264d..302ef96b0d 100644 --- a/ouroboros-consensus-cardano/test/shelley-test/Main.hs +++ b/ouroboros-consensus-cardano/test/shelley-test/Main.hs @@ -2,6 +2,7 @@ module Main (main) where import qualified Test.Consensus.Shelley.Coherence (tests) import qualified Test.Consensus.Shelley.Golden (tests) +import qualified Test.Consensus.Shelley.LedgerTables (tests) import qualified Test.Consensus.Shelley.Serialisation (tests) import qualified Test.Consensus.Shelley.SupportedNetworkProtocolVersion (tests) import Test.Tasty @@ -17,6 +18,7 @@ tests = testGroup "shelley" [ Test.Consensus.Shelley.Coherence.tests , Test.Consensus.Shelley.Golden.tests + , Test.Consensus.Shelley.LedgerTables.tests , Test.Consensus.Shelley.Serialisation.tests , Test.Consensus.Shelley.SupportedNetworkProtocolVersion.tests , Test.ThreadNet.Shelley.tests diff --git a/ouroboros-consensus-cardano/test/shelley-test/Test/Consensus/Shelley/LedgerTables.hs b/ouroboros-consensus-cardano/test/shelley-test/Test/Consensus/Shelley/LedgerTables.hs new file mode 100644 index 0000000000..002af6a555 --- /dev/null +++ b/ouroboros-consensus-cardano/test/shelley-test/Test/Consensus/Shelley/LedgerTables.hs @@ -0,0 +1,64 @@ +{-# LANGUAGE FlexibleContexts #-} +{-# LANGUAGE FlexibleInstances #-} +{-# LANGUAGE TypeApplications #-} +{-# LANGUAGE TypeFamilies #-} +{-# LANGUAGE UndecidableInstances #-} + +{-# OPTIONS_GHC -Wno-orphans #-} + +module Test.Consensus.Shelley.LedgerTables (tests) where + +import Cardano.Crypto.Hash (ShortHash) +import Ouroboros.Consensus.Ledger.Tables +import Ouroboros.Consensus.Protocol.Praos (Praos) +import Ouroboros.Consensus.Protocol.TPraos (TPraos) +import Ouroboros.Consensus.Shelley.Eras +import Ouroboros.Consensus.Shelley.HFEras () +import Ouroboros.Consensus.Shelley.Ledger +import Ouroboros.Consensus.Shelley.Ledger.SupportsProtocol () +import Test.Cardano.Ledger.Alonzo.Serialisation.Generators () +import Test.Cardano.Ledger.Babbage.Arbitrary () +import Test.Cardano.Ledger.Babbage.Serialisation.Generators () +import Test.Cardano.Ledger.Conway.Arbitrary () +import Test.Consensus.Shelley.Generators () +import Test.Consensus.Shelley.MockCrypto (CanMock, MockCrypto) +import Test.LedgerTables +import Test.Tasty +import Test.Tasty.QuickCheck + +type Crypto = MockCrypto ShortHash +type Proto = TPraos Crypto + +tests :: TestTree +tests = testGroup "LedgerTables" + [ testGroup "Shelley" + [ testProperty "Stowable laws" (prop_stowable_laws @(ShelleyBlock Proto (ShelleyEra Crypto))) + , testProperty "HasLedgerTables laws" (prop_hasledgertables_laws @(ShelleyBlock Proto (ShelleyEra Crypto))) + ] + , testGroup "Allegra" + [ testProperty "Stowable laws" (prop_stowable_laws @(ShelleyBlock Proto (AllegraEra Crypto))) + , testProperty "HasLedgerTables laws" (prop_hasledgertables_laws @(ShelleyBlock Proto (AllegraEra Crypto))) + ] + , testGroup "Mary" + [ testProperty "Stowable laws" (prop_stowable_laws @(ShelleyBlock Proto (MaryEra Crypto))) + , testProperty "HasLedgerTables laws" (prop_hasledgertables_laws @(ShelleyBlock Proto (MaryEra Crypto))) + ] + , testGroup "Alonzo" + [ testProperty "Stowable laws" (prop_stowable_laws @(ShelleyBlock Proto (AlonzoEra Crypto))) + , testProperty "HasLedgerTables laws" (prop_hasledgertables_laws @(ShelleyBlock Proto (AlonzoEra Crypto))) + ] + , testGroup "Babbage" + [ testProperty "Stowable laws" (prop_stowable_laws @(ShelleyBlock (Praos StandardCrypto) (BabbageEra StandardCrypto))) + , testProperty "HasLedgerTables laws" (prop_hasledgertables_laws @(ShelleyBlock (Praos StandardCrypto) (BabbageEra StandardCrypto))) + ] + , testGroup "Conway" + [ testProperty "Stowable laws" (prop_stowable_laws @(ShelleyBlock (Praos StandardCrypto) (ConwayEra StandardCrypto))) + , testProperty "HasLedgerTables laws" (prop_hasledgertables_laws @(ShelleyBlock (Praos StandardCrypto) (ConwayEra StandardCrypto))) + ] + ] + + +instance ( CanMock proto era + , Arbitrary (LedgerState (ShelleyBlock proto era) EmptyMK) + ) => Arbitrary (LedgerTables (LedgerState (ShelleyBlock proto era)) ValuesMK) where + arbitrary = projectLedgerTables . unstowLedgerTables <$> arbitrary diff --git a/ouroboros-consensus-cardano/test/shelley-test/Test/ThreadNet/Shelley.hs b/ouroboros-consensus-cardano/test/shelley-test/Test/ThreadNet/Shelley.hs index c75fb66f8f..8e3b30b396 100644 --- a/ouroboros-consensus-cardano/test/shelley-test/Test/ThreadNet/Shelley.hs +++ b/ouroboros-consensus-cardano/test/shelley-test/Test/ThreadNet/Shelley.hs @@ -1,3 +1,4 @@ +{-# LANGUAGE DataKinds #-} {-# LANGUAGE LambdaCase #-} {-# LANGUAGE NamedFieldPuns #-} {-# LANGUAGE TypeApplications #-} @@ -352,7 +353,7 @@ prop_simple_real_tpraos_convergence TestSetup DoGeneratePPUs -> True DoNotGeneratePPUs -> False - finalLedgers :: [(NodeId, LedgerState (ShelleyBlock Proto Era))] + finalLedgers :: [(NodeId, LedgerState (ShelleyBlock Proto Era) EmptyMK)] finalLedgers = Map.toList $ nodeOutputFinalLedger <$> testOutputNodes testOutput diff --git a/ouroboros-consensus-cardano/test/tools-test/Main.hs b/ouroboros-consensus-cardano/test/tools-test/Main.hs index 0ff98843e6..3cfea1b2a5 100644 --- a/ouroboros-consensus-cardano/test/tools-test/Main.hs +++ b/ouroboros-consensus-cardano/test/tools-test/Main.hs @@ -63,6 +63,7 @@ testAnalyserConfig :: DBAnalyserConfig testAnalyserConfig = DBAnalyserConfig { dbDir = chainDB + , ldbBackend = V2InMem , verbose = False , selectDB = SelectImmutableDB Origin , validation = Just ValidateAllBlocks @@ -73,7 +74,7 @@ testAnalyserConfig = testBlockArgs :: Cardano.Args (CardanoBlock StandardCrypto) testBlockArgs = Cardano.CardanoBlockArgs nodeConfig Nothing --- | A multi-step test including synthesis and analaysis 'SomeConsensusProtocol' using the Cardano instance. +-- | A multi-step test including synthesis and analysis 'SomeConsensusProtocol' using the Cardano instance. -- -- 1. step: synthesize a ChainDB from scratch and count the amount of blocks forged. -- 2. step: append to the previous ChainDB and coutn the amount of blocks forged. @@ -109,7 +110,7 @@ blockCountTest logStep = do "wrong number of blocks encountered during analysis \ \ (counted: " ++ show resultAnalysis ++ "; expected: " ++ show blockCount ++ ")" where - genTxs _ _ _ = pure [] + genTxs _ _ _ _ = pure [] tests :: TestTree tests = diff --git a/ouroboros-consensus-diffusion/ouroboros-consensus-diffusion.cabal b/ouroboros-consensus-diffusion/ouroboros-consensus-diffusion.cabal index c255270097..2e87036f13 100644 --- a/ouroboros-consensus-diffusion/ouroboros-consensus-diffusion.cabal +++ b/ouroboros-consensus-diffusion/ouroboros-consensus-diffusion.cabal @@ -103,6 +103,7 @@ library text, time, transformers, + transformers-base, typed-protocols, typed-protocols-stateful, @@ -157,6 +158,7 @@ library unstable-diffusion-testlib strict-sop-core ^>=0.1, strict-stm, text, + transformers-base, typed-protocols, library unstable-mock-testlib @@ -201,6 +203,7 @@ test-suite mock-test main-is: Main.hs other-modules: Test.Consensus.Ledger.Mock + Test.Consensus.Ledger.Mock.LedgerTables Test.ThreadNet.BFT Test.ThreadNet.LeaderSchedule Test.ThreadNet.PBFT @@ -280,6 +283,7 @@ test-suite consensus-test base, binary, bytestring, + cardano-binary, cardano-crypto-class, cardano-slotting:{cardano-slotting, testlib}, cardano-strict-containers, @@ -316,6 +320,7 @@ test-suite consensus-test tasty-quickcheck, temporary, time, + transformers-base, tree-diff, typed-protocols, unstable-diffusion-testlib, diff --git a/ouroboros-consensus-diffusion/src/ouroboros-consensus-diffusion/Ouroboros/Consensus/Network/NodeToClient.hs b/ouroboros-consensus-diffusion/src/ouroboros-consensus-diffusion/Ouroboros/Consensus/Network/NodeToClient.hs index bae1f4ce6a..cecaade4a2 100644 --- a/ouroboros-consensus-diffusion/src/ouroboros-consensus-diffusion/Ouroboros/Consensus/Network/NodeToClient.hs +++ b/ouroboros-consensus-diffusion/src/ouroboros-consensus-diffusion/Ouroboros/Consensus/Network/NodeToClient.hs @@ -1,9 +1,11 @@ {-# LANGUAGE DataKinds #-} {-# LANGUAGE ExistentialQuantification #-} {-# LANGUAGE FlexibleContexts #-} +{-# LANGUAGE GADTs #-} {-# LANGUAGE NamedFieldPuns #-} {-# LANGUAGE PolyKinds #-} {-# LANGUAGE QuantifiedConstraints #-} +{-# LANGUAGE RankNTypes #-} {-# LANGUAGE RecordWildCards #-} {-# LANGUAGE ScopedTypeVariables #-} @@ -41,6 +43,7 @@ import Codec.Serialise (Serialise) import Control.ResourceRegistry import Control.Tracer import Data.ByteString.Lazy (ByteString) +import Data.Typeable import Data.Void (Void) import Network.TypedProtocol.Codec import qualified Network.TypedProtocol.Stateful.Codec as Stateful @@ -62,7 +65,6 @@ import qualified Ouroboros.Consensus.Storage.ChainDB.API as ChainDB import Ouroboros.Consensus.Util (ShowProxy) import Ouroboros.Consensus.Util.IOLike import Ouroboros.Consensus.Util.Orphans () -import qualified Ouroboros.Network.AnchoredFragment as AF import Ouroboros.Network.Block (Serialised, decodePoint, decodeTip, encodePoint, encodeTip) import Ouroboros.Network.BlockFetch @@ -101,7 +103,8 @@ data Handlers m peer blk = Handlers { :: LocalTxSubmissionServer (GenTx blk) (ApplyTxErr blk) m () , hStateQueryServer - :: LocalStateQueryServer blk (Point blk) (Query blk) m () + :: ResourceRegistry m + -> LocalStateQueryServer blk (Point blk) (Query blk) m () , hTxMonitorServer :: LocalTxMonitorServer (GenTxId blk) (GenTx blk) SlotNo m () @@ -129,12 +132,8 @@ mkHandlers NodeKernelArgs {cfg, tracers} NodeKernel {getChainDB, getMempool} = (Node.localTxSubmissionServerTracer tracers) getMempool , hStateQueryServer = - localStateQueryServer - (ExtLedgerCfg cfg) - (ChainDB.getTipPoint getChainDB) - (ChainDB.getPastLedger getChainDB) - (castPoint . AF.anchorPoint <$> ChainDB.getCurrentChain getChainDB) - + localStateQueryServer (ExtLedgerCfg cfg) + . ChainDB.getReadOnlyForkerAtPoint getChainDB , hTxMonitorServer = localTxMonitorServer getMempool @@ -178,7 +177,7 @@ type ClientCodecs blk m = defaultCodecs :: forall m blk. ( MonadST m , SerialiseNodeToClientConstraints blk - , ShowQuery (BlockQuery blk) + , forall fp. ShowQuery (BlockQuery blk fp) , StandardHash blk , Serialise (HeaderHash blk) ) @@ -209,7 +208,7 @@ defaultCodecs ccfg version networkVersion = Codecs { (encodePoint (encodeRawHash p)) (decodePoint (decodeRawHash p)) (queryEncodeNodeToClient ccfg queryVersion version . SomeSecond) - ((\(SomeSecond qry) -> Some qry) <$> queryDecodeNodeToClient ccfg queryVersion version) + ((\(SomeSecond q) -> Some q) <$> queryDecodeNodeToClient ccfg queryVersion version) (encodeResult ccfg version) (decodeResult ccfg version) @@ -238,7 +237,7 @@ defaultCodecs ccfg version networkVersion = Codecs { clientCodecs :: forall m blk. ( MonadST m , SerialiseNodeToClientConstraints blk - , ShowQuery (BlockQuery blk) + , forall fp. ShowQuery (BlockQuery blk fp) , StandardHash blk , Serialise (HeaderHash blk) ) @@ -269,7 +268,7 @@ clientCodecs ccfg version networkVersion = Codecs { (encodePoint (encodeRawHash p)) (decodePoint (decodeRawHash p)) (queryEncodeNodeToClient ccfg queryVersion version . SomeSecond) - ((\(SomeSecond qry) -> Some qry) <$> queryDecodeNodeToClient ccfg queryVersion version) + ((\(SomeSecond q) -> Some q) <$> queryDecodeNodeToClient ccfg queryVersion version) (encodeResult ccfg version) (decodeResult ccfg version) @@ -347,7 +346,7 @@ showTracers :: ( Show peer , Show (GenTx blk) , Show (GenTxId blk) , Show (ApplyTxErr blk) - , ShowQuery (BlockQuery blk) + , forall fp. ShowQuery (BlockQuery blk fp) , HasHeader blk ) => Tracer m String -> Tracers m peer blk e @@ -389,10 +388,10 @@ mkApps :: , Exception e , ShowProxy blk , ShowProxy (ApplyTxErr blk) - , ShowProxy (BlockQuery blk) , ShowProxy (GenTx blk) , ShowProxy (GenTxId blk) - , ShowQuery (BlockQuery blk) + , ShowProxy (Query blk) + , forall fp. ShowQuery (BlockQuery blk fp) ) => NodeKernel m addrNTN addrNTC blk -> Tracers m addrNTC blk e @@ -437,12 +436,13 @@ mkApps kernel Tracers {..} Codecs {..} Handlers {..} = -> m ((), Maybe bSQ) aStateQueryServer them channel = do labelThisThread "LocalStateQueryServer" - Stateful.runPeer - (contramap (TraceLabelPeer them) tStateQueryTracer) - cStateQueryCodec - channel - LocalStateQuery.StateIdle - (localStateQueryServerPeer hStateQueryServer) + withRegistry $ \rr -> + Stateful.runPeer + (contramap (TraceLabelPeer them) tStateQueryTracer) + cStateQueryCodec + channel + LocalStateQuery.StateIdle + (localStateQueryServerPeer (hStateQueryServer rr)) aTxMonitorServer :: addrNTC diff --git a/ouroboros-consensus-diffusion/src/ouroboros-consensus-diffusion/Ouroboros/Consensus/Node.hs b/ouroboros-consensus-diffusion/src/ouroboros-consensus-diffusion/Ouroboros/Consensus/Node.hs index 2f7427958b..9cf6df34a5 100644 --- a/ouroboros-consensus-diffusion/src/ouroboros-consensus-diffusion/Ouroboros/Consensus/Node.hs +++ b/ouroboros-consensus-diffusion/src/ouroboros-consensus-diffusion/Ouroboros/Consensus/Node.hs @@ -10,6 +10,7 @@ {-# LANGUAGE RecordWildCards #-} {-# LANGUAGE ScopedTypeVariables #-} {-# LANGUAGE StandaloneDeriving #-} +{-# LANGUAGE StandaloneKindSignatures #-} {-# LANGUAGE TemplateHaskell #-} {-# LANGUAGE TupleSections #-} {-# LANGUAGE TypeApplications #-} @@ -38,7 +39,6 @@ module Ouroboros.Consensus.Node ( , ChainDB.RelativeMountPoint (..) , ChainDB.TraceEvent (..) , ChainDbArgs (..) - , DiskPolicyArgs (..) , HardForkBlockchainTimeArgs (..) , LastShutDownWasClean (..) , LowLevelRunNodeArgs (..) @@ -49,6 +49,7 @@ module Ouroboros.Consensus.Node ( , ProtocolInfo (..) , RunNode , RunNodeArgs (..) + , SnapshotPolicyArgs (..) , Tracers , Tracers' (..) -- * Internal helpers @@ -63,6 +64,7 @@ import Codec.Serialise (DeserialiseFailure) import qualified Control.Concurrent.Class.MonadSTM.Strict as StrictSTM import Control.DeepSeq (NFData) import Control.Monad (forM_, when) +import Control.Monad.Base (MonadBase) import Control.Monad.Class.MonadTime.SI (MonadTime) import Control.Monad.Class.MonadTimer.SI (MonadTimer) import Control.ResourceRegistry @@ -70,6 +72,7 @@ import Control.Tracer (Tracer, contramap, traceWith) import Data.ByteString.Lazy (ByteString) import Data.Functor.Contravariant (Predicate (..)) import Data.Hashable (Hashable) +import Data.Kind (Type) import Data.Map.Strict (Map) import qualified Data.Map.Strict as Map import Data.Maybe (fromMaybe, isNothing) @@ -79,6 +82,7 @@ import Ouroboros.Consensus.Block import Ouroboros.Consensus.BlockchainTime hiding (getSystemStart) import Ouroboros.Consensus.Config import Ouroboros.Consensus.Config.SupportsNode +import Ouroboros.Consensus.Ledger.Basics (ValuesMK) import Ouroboros.Consensus.Ledger.Extended (ExtLedgerState (..)) import Ouroboros.Consensus.MiniProtocol.ChainSync.Client.HistoricityCheck (HistoricityCheck) @@ -106,8 +110,8 @@ import Ouroboros.Consensus.Storage.ChainDB (ChainDB, ChainDbArgs, TraceEvent) import qualified Ouroboros.Consensus.Storage.ChainDB as ChainDB import qualified Ouroboros.Consensus.Storage.ChainDB.Impl.Args as ChainDB -import Ouroboros.Consensus.Storage.LedgerDB.DiskPolicy - (DiskPolicyArgs (..)) +import Ouroboros.Consensus.Storage.LedgerDB.Impl.Args +import Ouroboros.Consensus.Storage.LedgerDB.Impl.Snapshots import Ouroboros.Consensus.Util.Args import Ouroboros.Consensus.Util.IOLike import Ouroboros.Consensus.Util.Orphans () @@ -168,7 +172,14 @@ import System.Random (StdGen, newStdGen, randomIO, split) -- | Arguments expected from any invocation of 'runWith', whether by deployed -- code, tests, etc. -data RunNodeArgs m addrNTN addrNTC blk (p2p :: Diffusion.P2P) = RunNodeArgs { +type RunNodeArgs :: + (Type -> Type) + -> Type + -> Type + -> Type + -> Diffusion.P2P + -> Type +data RunNodeArgs m addrNTN addrNTC blk p2p = RunNodeArgs { -- | Consensus tracers rnTraceConsensus :: Tracers m (ConnectionId addrNTN) (ConnectionId addrNTC) blk @@ -207,8 +218,16 @@ data RunNodeArgs m addrNTN addrNTC blk (p2p :: Diffusion.P2P) = RunNodeArgs { -- 'runWith'. The @cardano-node@, for example, instead calls the 'run' -- abbreviation, which uses 'stdLowLevelRunNodeArgsIO' to indirectly specify -- these low-level values from the higher-level 'StdRunNodeArgs'. -data LowLevelRunNodeArgs m addrNTN addrNTC versionDataNTN versionDataNTC blk - (p2p :: Diffusion.P2P) = +type LowLevelRunNodeArgs :: + (Type -> Type) + -> Type + -> Type + -> Type + -> Type + -> Type + -> Diffusion.P2P + -> Type +data LowLevelRunNodeArgs m addrNTN addrNTC versionDataNTN versionDataNTC blk p2p = LowLevelRunNodeArgs { -- | An action that will receive a marker indicating whether the previous @@ -231,7 +250,9 @@ data LowLevelRunNodeArgs m addrNTN addrNTC versionDataNTN versionDataNTC blk -- be created. , llrnMkVolatileHasFS :: ChainDB.RelativeMountPoint -> SomeHasFS m - -- | Customise the 'ChainDbArgs' + -- | Customise the 'ChainDbArgs'. 'StdRunNodeArgs' will use this field to + -- set various options that are exposed in @cardano-node@ configuration + -- files. , llrnCustomiseChainDbArgs :: Complete ChainDbArgs m blk -> Complete ChainDbArgs m blk @@ -289,6 +310,9 @@ data LowLevelRunNodeArgs m addrNTN addrNTC versionDataNTN versionDataNTC blk , llrnMaxClockSkew :: InFutureCheck.ClockSkew , llrnPublicPeerSelectionStateVar :: StrictSTM.StrictTVar m (Diffusion.PublicPeerSelectionState addrNTN) + + -- | The flavor arguments + , llrnLdbFlavorArgs :: Complete LedgerDbFlavorArgs m } data NodeDatabasePaths = @@ -321,7 +345,6 @@ data StdRunNodeArgs m blk (p2p :: Diffusion.P2P) = StdRunNodeArgs , srnBfcMaxConcurrencyDeadline :: Maybe Word , srnChainDbValidateOverride :: Bool -- ^ If @True@, validate the ChainDB on init no matter what - , srnDiskPolicyArgs :: DiskPolicyArgs , srnDatabasePath :: NodeDatabasePaths -- ^ Location of the DBs , srnDiffusionArguments :: Diffusion.Arguments @@ -344,6 +367,10 @@ data StdRunNodeArgs m blk (p2p :: Diffusion.P2P) = StdRunNodeArgs -- capacity of the mempool. , srnChainSyncTimeout :: Maybe (m NTN.ChainSyncTimeout) -- ^ A custom timeout for ChainSync. + + -- Ad hoc values to replace default ChainDB configurations + , srnSnapshotPolicyArgs :: SnapshotPolicyArgs + , srnLdbFlavorArgs :: Complete LedgerDbFlavorArgs m -- TODO this will contain a fs?? it should probably not as the node doesn't know about those } {------------------------------------------------------------------------------- @@ -367,8 +394,9 @@ run :: forall blk p2p. => RunNodeArgs IO RemoteAddress LocalAddress blk p2p -> StdRunNodeArgs IO blk p2p -> IO () -run args stdArgs = stdLowLevelRunNodeArgsIO args stdArgs >>= runWith args encodeRemoteAddress decodeRemoteAddress - +run args stdArgs = + stdLowLevelRunNodeArgsIO args stdArgs + >>= runWith args encodeRemoteAddress decodeRemoteAddress -- | Extra constraints used by `ouroboros-network`. -- @@ -398,6 +426,7 @@ runWith :: forall m addrNTN addrNTC versionDataNTN versionDataNTC blk p2p. , Hashable addrNTN -- the constraint comes from `initNodeKernel` , NetworkIO m , NetworkAddr addrNTN + , MonadBase m m ) => RunNodeArgs m addrNTN addrNTC blk p2p -> (NodeToNodeVersion -> addrNTN -> CBOR.Encoding) @@ -412,15 +441,13 @@ runWith RunNodeArgs{..} encAddrNtN decAddrNtN LowLevelRunNodeArgs{..} = -- Ignore exception thrown in connection handlers and diffusion. -- Also ignore 'ExitSuccess'. (runPredicate $ - (Predicate $ \err -> - (case fromException @ExceptionInLinkedThread err of + Predicate ( \err -> + case fromException @ExceptionInLinkedThread err of Just (ExceptionInLinkedThread _ err') - -> maybe True (/= ExitSuccess) $ fromException err' - Nothing -> False)) - <> (Predicate $ \err -> - isNothing (fromException @ExceptionInHandler err)) - <> (Predicate $ \err -> - isNothing (fromException @Diffusion.Failure err)) + -> (/= Just ExitSuccess) $ fromException err' + Nothing -> False) + <> Predicate (isNothing . fromException @ExceptionInHandler) + <> Predicate (isNothing . fromException @Diffusion.Failure) ) (\err -> traceWith (consensusErrorTracer rnTraceConsensus) err >> throwIO err @@ -453,6 +480,7 @@ runWith RunNodeArgs{..} encAddrNtN decAddrNtN LowLevelRunNodeArgs{..} = initLedger llrnMkImmutableHasFS llrnMkVolatileHasFS + llrnLdbFlavorArgs llrnChainDbArgsDefaults ( setLoEinChainDbArgs . maybeValidateAll @@ -707,21 +735,22 @@ stdWithCheckedDB pb tracer databasePath networkMagic body = do hasFS = ioHasFS mountPoint openChainDB :: - forall m blk. (RunNode blk, IOLike m) + forall m blk. (RunNode blk, IOLike m, MonadBase m m) => ResourceRegistry m -> TopLevelConfig blk - -> ExtLedgerState blk + -> ExtLedgerState blk ValuesMK -- ^ Initial ledger -> (ChainDB.RelativeMountPoint -> SomeHasFS m) -- ^ Immutable FS, see 'NodeDatabasePaths' -> (ChainDB.RelativeMountPoint -> SomeHasFS m) -- ^ Volatile FS, see 'NodeDatabasePaths' + -> Complete LedgerDbFlavorArgs m -> Incomplete ChainDbArgs m blk -- ^ A set of default arguments (possibly modified from 'defaultArgs') -> (Complete ChainDbArgs m blk -> Complete ChainDbArgs m blk) -- ^ Customise the 'ChainDbArgs' -> m (ChainDB m blk, Complete ChainDbArgs m blk) -openChainDB registry cfg initLedger fsImm fsVol defArgs customiseArgs = +openChainDB registry cfg initLedger fsImm fsVol flavorArgs defArgs customiseArgs = let args = customiseArgs $ ChainDB.completeChainDbArgs registry cfg @@ -730,6 +759,7 @@ openChainDB registry cfg initLedger fsImm fsVol defArgs customiseArgs = (nodeCheckIntegrity (configStorage cfg)) fsImm fsVol + flavorArgs defArgs in (,args) <$> ChainDB.openDB args @@ -885,7 +915,7 @@ stdRunDataDiffusion = Diffusion.run -- | Conveniently packaged 'LowLevelRunNodeArgs' arguments from a standard -- non-testing invocation. stdLowLevelRunNodeArgsIO :: - forall blk p2p. RunNode blk + forall blk p2p . RunNode blk => RunNodeArgs IO RemoteAddress LocalAddress blk p2p -> StdRunNodeArgs IO blk p2p -> IO (LowLevelRunNodeArgs @@ -955,6 +985,8 @@ stdLowLevelRunNodeArgsIO RunNodeArgs{ rnProtocolInfo InFutureCheck.defaultClockSkew , llrnPublicPeerSelectionStateVar = Diffusion.daPublicPeerSelectionVar srnDiffusionArguments + , llrnLdbFlavorArgs = + srnLdbFlavorArgs } where networkMagic :: NetworkMagic @@ -964,13 +996,12 @@ stdLowLevelRunNodeArgsIO RunNodeArgs{ rnProtocolInfo Incomplete ChainDbArgs IO blk -> Incomplete ChainDbArgs IO blk updateChainDbDefaults = - ChainDB.updateDiskPolicyArgs srnDiskPolicyArgs + ChainDB.updateSnapshotPolicyArgs srnSnapshotPolicyArgs . ChainDB.updateTracer srnTraceChainDB . (if not srnChainDbValidateOverride then id else ChainDB.ensureValidateAll) - llrnCustomiseNodeKernelArgs :: NodeKernelArgs m addrNTN (ConnectionId addrNTC) blk -> NodeKernelArgs m addrNTN (ConnectionId addrNTC) blk diff --git a/ouroboros-consensus-diffusion/src/ouroboros-consensus-diffusion/Ouroboros/Consensus/Node/GSM.hs b/ouroboros-consensus-diffusion/src/ouroboros-consensus-diffusion/Ouroboros/Consensus/Node/GSM.hs index 129e15f0c5..e6b567fcf6 100644 --- a/ouroboros-consensus-diffusion/src/ouroboros-consensus-diffusion/Ouroboros/Consensus/Node/GSM.hs +++ b/ouroboros-consensus-diffusion/src/ouroboros-consensus-diffusion/Ouroboros/Consensus/Node/GSM.hs @@ -165,7 +165,7 @@ initializationGsmState :: ( L.GetTip (L.LedgerState blk) , Monad m ) - => m (L.LedgerState blk) + => m (L.LedgerState blk L.EmptyMK) -> Maybe (WrapDurationUntilTooOld m blk) -- ^ 'Nothing' if @blk@ has no age limit -> MarkerFileView m @@ -446,7 +446,7 @@ realDurationUntilTooOld :: , MonadSTM m ) => L.LedgerConfig blk - -> STM m (L.LedgerState blk) + -> STM m (L.LedgerState blk L.EmptyMK) -> NominalDiffTime -- ^ If the volatile tip is older than this, then the node will exit the -- @CaughtUp@ state. diff --git a/ouroboros-consensus-diffusion/src/ouroboros-consensus-diffusion/Ouroboros/Consensus/NodeKernel.hs b/ouroboros-consensus-diffusion/src/ouroboros-consensus-diffusion/Ouroboros/Consensus/NodeKernel.hs index 224cba0d08..175ab50cef 100644 --- a/ouroboros-consensus-diffusion/src/ouroboros-consensus-diffusion/Ouroboros/Consensus/NodeKernel.hs +++ b/ouroboros-consensus-diffusion/src/ouroboros-consensus-diffusion/Ouroboros/Consensus/NodeKernel.hs @@ -42,6 +42,7 @@ import Data.Function (on) import Data.Functor ((<&>)) import Data.Hashable (Hashable) import Data.List.NonEmpty (NonEmpty) +import qualified Data.List.NonEmpty as NE import Data.Map.Strict (Map) import Data.Maybe (isJust, mapMaybe) import Data.Proxy @@ -59,6 +60,7 @@ import Ouroboros.Consensus.Ledger.Extended import Ouroboros.Consensus.Ledger.SupportsMempool import Ouroboros.Consensus.Ledger.SupportsPeerSelection import Ouroboros.Consensus.Ledger.SupportsProtocol +import Ouroboros.Consensus.Ledger.Tables.Utils (forgetLedgerTables) import Ouroboros.Consensus.Mempool import qualified Ouroboros.Consensus.MiniProtocol.BlockFetch.ClientInterface as BlockFetchClientInterface import Ouroboros.Consensus.MiniProtocol.ChainSync.Client @@ -81,6 +83,9 @@ import qualified Ouroboros.Consensus.Storage.ChainDB.API as ChainDB import qualified Ouroboros.Consensus.Storage.ChainDB.API.Types.InvalidBlockPunishment as InvalidBlockPunishment import Ouroboros.Consensus.Storage.ChainDB.Init (InitChainDB) import qualified Ouroboros.Consensus.Storage.ChainDB.Init as InitChainDB +import Ouroboros.Consensus.Storage.LedgerDB +import qualified Ouroboros.Consensus.Storage.LedgerDB as LedgerDB +import Ouroboros.Consensus.Util (whenJust) import Ouroboros.Consensus.Util.AnchoredFragment (preferAnchoredCandidate) import Ouroboros.Consensus.Util.EarlyExit @@ -106,6 +111,7 @@ import Ouroboros.Network.PeerSharing (PeerSharingAPI, PeerSharingRegistry, newPeerSharingAPI, newPeerSharingRegistry, ps_POLICY_PEER_SHARE_MAX_PEERS, ps_POLICY_PEER_SHARE_STICKY_TIME) +import Ouroboros.Network.Protocol.LocalStateQuery.Type (Target (..)) import Ouroboros.Network.SizeInBytes import Ouroboros.Network.TxSubmission.Inbound (TxSubmissionMempoolWriter) @@ -426,208 +432,216 @@ forkBlockForging :: forkBlockForging IS{..} blockForging = forkLinkedWatcher registry threadLabel $ knownSlotWatcher btime - $ withEarlyExit_ . go + $ \currentSlot -> withRegistry (\rr -> withEarlyExit_ $ go rr currentSlot) where threadLabel :: String threadLabel = "NodeKernel.blockForging." <> Text.unpack (forgeLabel blockForging) - go :: SlotNo -> WithEarlyExit m () - go currentSlot = do - trace $ TraceStartLeadershipCheck currentSlot + go :: ResourceRegistry m -> SlotNo -> WithEarlyExit m () + go reg currentSlot = do + trace $ TraceStartLeadershipCheck currentSlot - -- Figure out which block to connect to - -- - -- Normally this will be the current block at the tip, but it may - -- be the /previous/ block, if there were multiple slot leaders - BlockContext{bcBlockNo, bcPrevPoint} <- do - eBlkCtx <- lift $ atomically $ - mkCurrentBlockContext currentSlot + -- Figure out which block to connect to + -- + -- Normally this will be the current block at the tip, but it may be the + -- /previous/ block, if there were multiple slot leaders + BlockContext{bcBlockNo, bcPrevPoint} <- do + eBlkCtx <- lift $ atomically $ + mkCurrentBlockContext currentSlot <$> ChainDB.getCurrentChain chainDB - case eBlkCtx of - Right blkCtx -> return blkCtx - Left failure -> do - trace failure - exitEarly + case eBlkCtx of + Right blkCtx -> return blkCtx + Left failure -> do + trace failure + exitEarly - trace $ TraceBlockContext currentSlot bcBlockNo bcPrevPoint + trace $ TraceBlockContext currentSlot bcBlockNo bcPrevPoint - -- Get ledger state corresponding to bcPrevPoint - -- - -- This might fail if, in between choosing 'bcPrevPoint' and this call to - -- 'getPastLedger', we switched to a fork where 'bcPrevPoint' is no longer - -- on our chain. When that happens, we simply give up on the chance to - -- produce a block. - unticked <- do - mExtLedger <- lift $ atomically $ ChainDB.getPastLedger chainDB bcPrevPoint - case mExtLedger of - Just l -> return l - Nothing -> do - trace $ TraceNoLedgerState currentSlot bcPrevPoint - exitEarly - - trace $ TraceLedgerState currentSlot bcPrevPoint - - -- We require the ledger view in order to construct the ticked - -- 'ChainDepState'. - ledgerView <- - case runExcept $ forecastFor - (ledgerViewForecastAt - (configLedger cfg) - (ledgerState unticked)) - currentSlot of - Left err -> do - -- There are so many empty slots between the tip of our chain and - -- the current slot that we cannot get an ledger view anymore - -- In principle, this is no problem; we can still produce a block - -- (we use the ticked ledger state). However, we probably don't - -- /want/ to produce a block in this case; we are most likely - -- missing a blocks on our chain. - trace $ TraceNoLedgerView currentSlot err - exitEarly - Right lv -> - return lv - - trace $ TraceLedgerView currentSlot - - -- Tick the 'ChainDepState' for the 'SlotNo' we're producing a block - -- for. We only need the ticked 'ChainDepState' to check the whether - -- we're a leader. This is much cheaper than ticking the entire - -- 'ExtLedgerState'. - let tickedChainDepState :: Ticked (ChainDepState (BlockProtocol blk)) - tickedChainDepState = - tickChainDepState - (configConsensus cfg) - ledgerView - currentSlot - (headerStateChainDep (headerState unticked)) - - -- Check if we are the leader - proof <- do - shouldForge <- lift $ - checkShouldForge blockForging - (contramap (TraceLabelCreds (forgeLabel blockForging)) - (forgeStateInfoTracer tracers)) - cfg + -- Get forker corresponding to bcPrevPoint + -- + -- This might fail if, in between choosing 'bcPrevPoint' and this call to + -- 'ChainDB.getReadOnlyForkerAtPoint', we switched to a fork where 'bcPrevPoint' + -- is no longer on our chain. When that happens, we simply give up on the + -- chance to produce a block. + forkerEith <- lift $ ChainDB.getReadOnlyForkerAtPoint chainDB reg (SpecificPoint bcPrevPoint) + -- Remember to close this forker before exiting! + forker <- case forkerEith of + Left _ -> do + trace $ TraceNoLedgerState currentSlot bcPrevPoint + exitEarly + Right forker -> pure forker + + unticked <- lift $ atomically $ LedgerDB.roforkerGetLedgerState forker + + trace $ TraceLedgerState currentSlot bcPrevPoint + + -- We require the ticked ledger view in order to construct the ticked + -- 'ChainDepState'. + ledgerView <- + case runExcept $ forecastFor + (ledgerViewForecastAt + (configLedger cfg) + (ledgerState unticked)) + currentSlot of + Left err -> do + -- There are so many empty slots between the tip of our chain and the + -- current slot that we cannot get an ledger view anymore In + -- principle, this is no problem; we can still produce a block (we use + -- the ticked ledger state). However, we probably don't /want/ to + -- produce a block in this case; we are most likely missing a blocks + -- on our chain. + trace $ TraceNoLedgerView currentSlot err + lift $ roforkerClose forker + exitEarly + Right lv -> + return lv + + trace $ TraceLedgerView currentSlot + + -- Tick the 'ChainDepState' for the 'SlotNo' we're producing a block for. We + -- only need the ticked 'ChainDepState' to check the whether we're a leader. + -- This is much cheaper than ticking the entire 'ExtLedgerState'. + let tickedChainDepState :: Ticked (ChainDepState (BlockProtocol blk)) + tickedChainDepState = + tickChainDepState + (configConsensus cfg) + ledgerView currentSlot - tickedChainDepState - case shouldForge of - ForgeStateUpdateError err -> do - trace $ TraceForgeStateUpdateError currentSlot err - exitEarly - CannotForge cannotForge -> do - trace $ TraceNodeCannotForge currentSlot cannotForge - exitEarly - NotLeader -> do - trace $ TraceNodeNotLeader currentSlot - exitEarly - ShouldForge p -> return p - - -- At this point we have established that we are indeed slot leader - trace $ TraceNodeIsLeader currentSlot - - -- Tick the ledger state for the 'SlotNo' we're producing a block for - let tickedLedgerState :: Ticked (LedgerState blk) - tickedLedgerState = - applyChainTick - (configLedger cfg) - currentSlot - (ledgerState unticked) - - _ <- evaluate tickedLedgerState - trace $ TraceForgeTickedLedgerState currentSlot bcPrevPoint - - -- Get a snapshot of the mempool that is consistent with the ledger - -- - -- NOTE: It is possible that due to adoption of new blocks the - -- /current/ ledger will have changed. This doesn't matter: we will - -- produce a block that fits onto the ledger we got above; if the - -- ledger in the meantime changes, the block we produce here may or - -- may not be adopted, but it won't be invalid. - (mempoolHash, mempoolSlotNo, mempoolSnapshot) <- lift $ atomically $ do - (mempoolHash, mempoolSlotNo) <- do - snap <- getSnapshot mempool -- only used for its tip-like information - let h :: ChainHash blk - h = castHash $ getTipHash $ snapshotLedgerState snap - pure (h, snapshotSlotNo snap) - - snap <- getSnapshotFor - mempool - (ForgeInKnownSlot currentSlot tickedLedgerState) - pure (mempoolHash, mempoolSlotNo, snap) - - let txs = - snapshotTake mempoolSnapshot - $ blockCapacityTxMeasure (configLedger cfg) tickedLedgerState - -- NB respect the capacity of the ledger state we're extending, - -- which is /not/ 'snapshotLedgerState' - - -- force the mempool's computation before the tracer event - _ <- evaluate (length txs) - _ <- evaluate (snapshotLedgerState mempoolSnapshot) - trace $ TraceForgingMempoolSnapshot currentSlot bcPrevPoint mempoolHash mempoolSlotNo - - -- Actually produce the block - newBlock <- lift $ - Block.forgeBlock blockForging + (headerStateChainDep (headerState unticked)) + + -- Check if we are the leader + proof <- do + shouldForge <- lift $ + checkShouldForge + blockForging + (contramap (TraceLabelCreds (forgeLabel blockForging)) + (forgeStateInfoTracer tracers)) cfg - bcBlockNo currentSlot - tickedLedgerState - txs - proof + tickedChainDepState + case shouldForge of + ForgeStateUpdateError err -> do + trace $ TraceForgeStateUpdateError currentSlot err + lift $ roforkerClose forker + exitEarly + CannotForge cannotForge -> do + trace $ TraceNodeCannotForge currentSlot cannotForge + lift $ roforkerClose forker + exitEarly + NotLeader -> do + trace $ TraceNodeNotLeader currentSlot + lift $ roforkerClose forker + exitEarly + ShouldForge p -> return p - trace $ TraceForgedBlock + -- At this point we have established that we are indeed slot leader + trace $ TraceNodeIsLeader currentSlot + + -- Tick the ledger state for the 'SlotNo' we're producing a block for + let tickedLedgerState :: Ticked1 (LedgerState blk) DiffMK + tickedLedgerState = + applyChainTick + (configLedger cfg) currentSlot - (ledgerTipPoint (ledgerState unticked)) - newBlock - (snapshotMempoolSize mempoolSnapshot) - - -- Add the block to the chain DB - let noPunish = InvalidBlockPunishment.noPunishment -- no way to punish yourself - -- Make sure that if an async exception is thrown while a block is - -- added to the chain db, we will remove txs from the mempool. - - -- 'addBlockAsync' is a non-blocking action, so `mask_` would suffice, - -- but the finalizer is a blocking operation, hence we need to use - -- 'uninterruptibleMask_' to make sure that async exceptions do not - -- interrupt it. - uninterruptibleMask_ $ do - result <- lift $ ChainDB.addBlockAsync chainDB noPunish newBlock - -- Block until we have processed the block - mbCurTip <- lift $ atomically $ ChainDB.blockProcessed result - - -- Check whether we adopted our block - when (mbCurTip /= SuccesfullyAddedBlock (blockPoint newBlock)) $ do - isInvalid <- lift $ atomically $ - ($ blockHash newBlock) . forgetFingerprint <$> - ChainDB.getIsInvalidBlock chainDB - case isInvalid of - Nothing -> - trace $ TraceDidntAdoptBlock currentSlot newBlock - Just reason -> do - trace $ TraceForgedInvalidBlock currentSlot newBlock reason - -- We just produced a block that is invalid according to the - -- ledger in the ChainDB, while the mempool said it is valid. - -- There is an inconsistency between the two! - -- - -- Remove all the transactions in that block, otherwise we'll - -- run the risk of forging the same invalid block again. This - -- means that we'll throw away some good transactions in the - -- process. - lift $ removeTxs mempool (map (txId . txForgetValidated) txs) - exitEarly + (ledgerState unticked) + + _ <- evaluate tickedLedgerState + trace $ TraceForgeTickedLedgerState currentSlot bcPrevPoint + + -- Get a snapshot of the mempool that is consistent with the ledger + -- + -- NOTE: It is possible that due to adoption of new blocks the + -- /current/ ledger will have changed. This doesn't matter: we will + -- produce a block that fits onto the ledger we got above; if the + -- ledger in the meantime changes, the block we produce here may or + -- may not be adopted, but it won't be invalid. + (mempoolHash, mempoolSlotNo) <- lift $ atomically $ do + snap <- getSnapshot mempool -- only used for its tip-like information + let h :: ChainHash blk + h = castHash $ getTipHash $ snapshotState snap + pure (h, snapshotSlotNo snap) + + let readTables = fmap castLedgerTables . roforkerReadTables forker . castLedgerTables + + mempoolSnapshot <- lift $ getSnapshotFor + mempool + currentSlot + tickedLedgerState + readTables + + lift $ roforkerClose forker + + let txs = [ tx | (tx, _, _) <- snapshotTxs mempoolSnapshot ] + + -- force the mempool's computation before the tracer event + _ <- evaluate (length txs) + _ <- evaluate mempoolHash + + trace $ TraceForgingMempoolSnapshot currentSlot bcPrevPoint mempoolHash mempoolSlotNo + + -- Actually produce the block + newBlock <- lift $ Block.forgeBlock + blockForging + cfg + bcBlockNo + currentSlot + (forgetLedgerTables tickedLedgerState) + txs + proof + + trace $ TraceForgedBlock + currentSlot + (ledgerTipPoint (ledgerState unticked)) + newBlock + (snapshotMempoolSize mempoolSnapshot) + + -- Add the block to the chain DB + let noPunish = InvalidBlockPunishment.noPunishment -- no way to punish yourself + -- Make sure that if an async exception is thrown while a block is + -- added to the chain db, we will remove txs from the mempool. + + -- 'addBlockAsync' is a non-blocking action, so `mask_` would suffice, + -- but the finalizer is a blocking operation, hence we need to use + -- 'uninterruptibleMask_' to make sure that async exceptions do not + -- interrupt it. + uninterruptibleMask_ $ do + result <- lift $ ChainDB.addBlockAsync chainDB noPunish newBlock + -- Block until we have processed the block + mbCurTip <- lift $ atomically $ ChainDB.blockProcessed result + + -- Check whether we adopted our block + when (mbCurTip /= SuccesfullyAddedBlock (blockPoint newBlock)) $ do + isInvalid <- lift $ atomically $ + ($ blockHash newBlock) . forgetFingerprint <$> + ChainDB.getIsInvalidBlock chainDB + case isInvalid of + Nothing -> + trace $ TraceDidntAdoptBlock currentSlot newBlock + Just reason -> do + trace $ TraceForgedInvalidBlock currentSlot newBlock reason + -- We just produced a block that is invalid according to the + -- ledger in the ChainDB, while the mempool said it is valid. + -- There is an inconsistency between the two! + -- + -- Remove all the transactions in that block, otherwise we'll + -- run the risk of forging the same invalid block again. This + -- means that we'll throw away some good transactions in the + -- process. + whenJust + (NE.nonEmpty (map (txId . txForgetValidated) txs)) + (lift . removeTxs mempool) + exitEarly -- We successfully produced /and/ adopted a block -- -- NOTE: we are tracing the transactions we retrieved from the Mempool, - -- not the transactions actually /in the block/. - -- The transactions in the block should be a prefix of the transactions - -- in the mempool. If this is not the case, this is a bug. - -- Unfortunately, we can't + -- not the transactions actually /in the block/. They should always + -- match, if they don't, that would be a bug. Unfortunately, we can't -- assert this here because the ability to extract transactions from a -- block, i.e., the @HasTxs@ class, is not implementable by all blocks, -- e.g., @DualBlock@. + trace $ TraceAdoptedBlock currentSlot newBlock txs trace :: TraceForgeEvent blk -> WithEarlyExit m () @@ -797,7 +811,7 @@ getMempoolWriter mempool = Inbound.TxSubmissionMempoolWriter getPeersFromCurrentLedger :: (IOLike m, LedgerSupportsPeerSelection blk) => NodeKernel m addrNTN addrNTC blk - -> (LedgerState blk -> Bool) + -> (LedgerState blk EmptyMK -> Bool) -> STM m (Maybe [(PoolStake, NonEmpty RelayAccessPoint)]) getPeersFromCurrentLedger kernel p = do immutableLedger <- @@ -823,7 +837,7 @@ getPeersFromCurrentLedgerAfterSlot :: getPeersFromCurrentLedgerAfterSlot kernel slotNo = getPeersFromCurrentLedger kernel afterSlotNo where - afterSlotNo :: LedgerState blk -> Bool + afterSlotNo :: LedgerState blk mk -> Bool afterSlotNo st = case ledgerTipSlot st of Origin -> False diff --git a/ouroboros-consensus-diffusion/src/unstable-diffusion-testlib/Test/ThreadNet/General.hs b/ouroboros-consensus-diffusion/src/unstable-diffusion-testlib/Test/ThreadNet/General.hs index 95008ef9ef..b050badad7 100644 --- a/ouroboros-consensus-diffusion/src/unstable-diffusion-testlib/Test/ThreadNet/General.hs +++ b/ouroboros-consensus-diffusion/src/unstable-diffusion-testlib/Test/ThreadNet/General.hs @@ -108,7 +108,7 @@ truncateNodeJoinPlan -- scale by t' / t Map.map (\(SlotNo i) -> SlotNo $ (i * t') `div` t) $ -- discard discarded nodes - Map.filterWithKey (\(CoreNodeId nid) _ -> nid < n') $ + Map.filterWithKey (\(CoreNodeId nid) _ -> nid < n') m truncateNodeTopology :: NodeTopology -> NumCoreNodes -> NodeTopology @@ -850,7 +850,9 @@ prop_general_internal syncity pga testOutput = -- Check that all self-issued blocks are pipelined. prop_pipelining :: Property - prop_pipelining = conjoin + prop_pipelining = case syncity of + SemiSync -> property True + Sync -> conjoin [ counterexample ("Node " <> condense nid <> " did not pipeline") $ counterexample ("some of its blocks forged as the sole slot leader:") $ counterexample (condense forgedButNotPipelined) $ diff --git a/ouroboros-consensus-diffusion/src/unstable-diffusion-testlib/Test/ThreadNet/Network.hs b/ouroboros-consensus-diffusion/src/unstable-diffusion-testlib/Test/ThreadNet/Network.hs index eb58519102..53faaecda9 100644 --- a/ouroboros-consensus-diffusion/src/unstable-diffusion-testlib/Test/ThreadNet/Network.hs +++ b/ouroboros-consensus-diffusion/src/unstable-diffusion-testlib/Test/ThreadNet/Network.hs @@ -41,6 +41,7 @@ import qualified Control.Concurrent.Class.MonadSTM as MonadSTM import Control.Concurrent.Class.MonadSTM.Strict (newTMVar) import qualified Control.Exception as Exn import Control.Monad +import Control.Monad.Base (MonadBase) import Control.Monad.Class.MonadTime.SI (MonadTime) import Control.Monad.Class.MonadTimer.SI (MonadTimer) import qualified Control.Monad.Except as Exc @@ -70,6 +71,7 @@ import Ouroboros.Consensus.Ledger.Extended import Ouroboros.Consensus.Ledger.Inspect import Ouroboros.Consensus.Ledger.SupportsMempool import Ouroboros.Consensus.Ledger.SupportsProtocol +import Ouroboros.Consensus.Ledger.Tables.Utils import Ouroboros.Consensus.Mempool import qualified Ouroboros.Consensus.MiniProtocol.ChainSync.Client as CSClient import qualified Ouroboros.Consensus.MiniProtocol.ChainSync.Client.HistoricityCheck as HistoricityCheck @@ -88,10 +90,11 @@ import Ouroboros.Consensus.NodeKernel as NodeKernel import Ouroboros.Consensus.Protocol.Abstract import qualified Ouroboros.Consensus.Storage.ChainDB as ChainDB import qualified Ouroboros.Consensus.Storage.ChainDB.API.Types.InvalidBlockPunishment as InvalidBlockPunishment -import Ouroboros.Consensus.Storage.ChainDB.Impl import Ouroboros.Consensus.Storage.ChainDB.Impl.Args -import qualified Ouroboros.Consensus.Storage.ChainDB.Impl.LgrDB as LedgerDB +import Ouroboros.Consensus.Storage.ChainDB.Impl.Types import qualified Ouroboros.Consensus.Storage.ImmutableDB as ImmutableDB +import Ouroboros.Consensus.Storage.LedgerDB +import Ouroboros.Consensus.Storage.LedgerDB.Impl.Args as LedgerDB import qualified Ouroboros.Consensus.Storage.VolatileDB as VolatileDB import Ouroboros.Consensus.Util.Assert import Ouroboros.Consensus.Util.Condense @@ -119,6 +122,7 @@ import Ouroboros.Network.Point (WithOrigin (..)) import qualified Ouroboros.Network.Protocol.ChainSync.Type as CS import Ouroboros.Network.Protocol.KeepAlive.Type import Ouroboros.Network.Protocol.Limits (waitForever) +import Ouroboros.Network.Protocol.LocalStateQuery.Type import Ouroboros.Network.Protocol.PeerSharing.Type (PeerSharing) import Ouroboros.Network.Protocol.TxSubmission2.Type import qualified System.FS.Sim.MockFS as Mock @@ -248,7 +252,7 @@ data ThreadNetworkArgs m blk = ThreadNetworkArgs -- context. -- data VertexStatus m blk - = VDown (Chain blk) (LedgerState blk) + = VDown (Chain blk) (LedgerState blk EmptyMK) -- ^ The vertex does not currently have a node instance; its previous -- instance stopped with this chain and ledger state (empty/initial before -- first instance) @@ -298,6 +302,7 @@ runThreadNetwork :: forall m blk. , TxGen blk , TracingConstraints blk , HasCallStack + , MonadBase m m ) => SystemTime m -> ThreadNetworkArgs m blk -> m (TestOutput blk) runThreadNetwork systemTime ThreadNetworkArgs @@ -352,7 +357,10 @@ runThreadNetwork systemTime ThreadNetworkArgs TestNodeInitialization{tniProtocolInfo} = nodeInitData ProtocolInfo{pInfoInitLedger} = tniProtocolInfo ExtLedgerState{ledgerState} = pInfoInitLedger - v <- uncheckedNewTVarM (VDown Genesis ledgerState) + v <- + uncheckedNewTVarM + $ VDown Genesis + $ forgetLedgerTables ledgerState pure (nid, v) -- fork the directed edges, which also allocates their status variables @@ -552,6 +560,7 @@ runThreadNetwork systemTime ThreadNetworkArgs ChainDB.getCurrentLedger chainDB finalChain <- ChainDB.toChain chainDB + pure (again, finalChain, ledgerState) -- end of the node's withRegistry @@ -604,36 +613,57 @@ runThreadNetwork systemTime ThreadNetworkArgs -> ResourceRegistry m -> (SlotNo -> STM m ()) -> LedgerConfig blk - -> STM m (LedgerState blk) + -> STM m (Point blk) + -> (ResourceRegistry m -> m (ReadOnlyForker' m blk)) -> Mempool m blk -> [GenTx blk] -- ^ valid transactions the node should immediately propagate -> m () - forkCrucialTxs clock s0 registry unblockForge lcfg getLdgr mempool txs0 = - void $ forkLinkedThread registry "crucialTxs" $ do - let wouldBeValid slot st tx = - isRight $ Exc.runExcept $ applyTx lcfg DoNotIntervene slot tx st - - checkSt slot snap = - any (wouldBeValid slot (snapshotLedgerState snap)) txs0 - - let loop (slot, ledger, mempFp) = do - (snap1, snap2) <- atomically $ do - snap1 <- getSnapshotFor mempool $ - -- This node would include these crucial txs if it leads in - -- this slot. - ForgeInKnownSlot slot $ applyChainTick lcfg slot ledger - snap2 <- getSnapshotFor mempool $ - -- Other nodes might include these crucial txs when leading - -- in the next slot. - ForgeInKnownSlot (succ slot) $ applyChainTick lcfg (succ slot) ledger - -- This loop will repeat for the next slot, so we only need to - -- check for this one and the next. - pure (snap1, snap2) + forkCrucialTxs clock s0 registry unblockForge lcfg getTipPoint mforker mempool txs0 = do + void $ forkLinkedThread registry "crucialTxs" $ withRegistry $ \reg -> do + let + wouldBeValid :: SlotNo + -> (RangeQueryPrevious (ExtLedgerState blk) -> m (LedgerTables (ExtLedgerState blk) ValuesMK)) + -> Ticked1 (LedgerState blk) DiffMK + -> GenTx blk + -> m Bool + wouldBeValid slot doRangeQuery st tx = do + (fullLedgerSt :: Ticked1 (LedgerState blk) ValuesMK) <- do + -- FIXME: we know that the range query implemetation will add at + -- most 1 to the number of requested keys, hence the + -- subtraction. When we revisit the range query implementation + -- we should remove this workaround. + fullUTxO <- doRangeQuery NoPreviousQuery + pure $! applyDiffs fullUTxO st + pure $ isRight $ Exc.runExcept $ applyTx lcfg DoNotIntervene slot tx fullLedgerSt + + + checkSt slot doRangeQuery snap = + or <$> mapM (wouldBeValid slot doRangeQuery (snapshotState snap)) txs0 + + let loop (slot, mempFp) = do + forker <- mforker reg + extLedger <- atomically $ roforkerGetLedgerState forker + let ledger = ledgerState extLedger + doRangeQuery = roforkerRangeReadTables forker + -- This node would include these crucial txs if it leads in + -- this slot. + let ledger' = applyChainTick lcfg slot ledger + readTables = fmap castLedgerTables . roforkerReadTables forker . castLedgerTables + snap1 <- getSnapshotFor mempool slot ledger' readTables + -- Other nodes might include these crucial txs when leading + -- in the next slot. + let ledger'' = applyChainTick lcfg (succ slot) ledger + snap2 <- getSnapshotFor mempool (succ slot) ledger'' readTables + -- Don't attempt to add them if we're sure they'll be invalid. -- That just risks blocking on a full mempool unnecessarily. - when (checkSt slot snap1 || checkSt (succ slot) snap2) $ do + b1 <- checkSt slot doRangeQuery snap1 + b2 <- checkSt (succ slot) doRangeQuery snap2 + roforkerClose forker + + when (b1 || b2) $ do _ <- addTxs mempool txs0 pure () @@ -645,7 +675,7 @@ runThreadNetwork systemTime ThreadNetworkArgs slotChanged = do let slot' = succ slot _ <- OracularClock.blockUntilSlot clock slot' - pure (slot', ledger, mempFp) + pure (slot', mempFp) -- a new tx (e.g. added by TxSubmission) might render a crucial -- transaction valid @@ -653,12 +683,12 @@ runThreadNetwork systemTime ThreadNetworkArgs let prjTno (_a, b, _c) = b :: TicketNo getMemp = (map prjTno . snapshotTxs) <$> getSnapshot mempool (mempFp', _) <- atomically $ blockUntilChanged id mempFp getMemp - pure (slot, ledger, mempFp') + pure (slot, mempFp') -- a new ledger state might render a crucial transaction valid ldgrChanged = do - (ledger', _) <- atomically $ blockUntilChanged ledgerTipPoint (ledgerTipPoint ledger) getLdgr - pure (slot, ledger', mempFp) + _ <- atomically $ blockUntilChanged id (ledgerTipPoint ledger) getTipPoint + pure (slot, mempFp) -- wake up when any of those change -- @@ -672,8 +702,7 @@ runThreadNetwork systemTime ThreadNetworkArgs void $ syncWithLedger mempool loop fps' - ledger0 <- atomically $ getLdgr - loop (s0, ledger0, []) + loop (s0, []) -- | Produce transactions every time the slot changes and submit them to -- the mempool. @@ -683,24 +712,34 @@ runThreadNetwork systemTime ThreadNetworkArgs -> OracularClock m -> TopLevelConfig blk -> Seed - -> STM m (ExtLedgerState blk) + -> (ResourceRegistry m -> m (ReadOnlyForker' m blk)) -- ^ How to get the current ledger state -> Mempool m blk -> m () - forkTxProducer coreNodeId registry clock cfg nodeSeed getExtLedger mempool = - void $ OracularClock.forkEachSlot registry clock "txProducer" $ \curSlotNo -> do - ledger <- atomically $ ledgerState <$> getExtLedger - -- Combine the node's seed with the current slot number, to make sure - -- we generate different transactions in each slot. - let txs = runGen - (nodeSeed `combineWith` unSlotNo curSlotNo) - (testGenTxs coreNodeId numCoreNodes curSlotNo cfg txGenExtra ledger) - - void $ addTxs mempool txs + forkTxProducer coreNodeId registry clock cfg nodeSeed mforker mempool = + void $ OracularClock.forkEachSlot registry clock "txProducer" $ \curSlotNo -> withRegistry $ \reg -> do + forker <- mforker reg + emptySt' <- atomically $ roforkerGetLedgerState forker + let emptySt = emptySt' + doRangeQuery = roforkerRangeReadTables forker + fullLedgerSt <- fmap ledgerState $ do + -- FIXME: we know that the range query implemetation will add at + -- most 1 to the number of requested keys, hence the + -- subtraction. When we revisit the range query implementation + -- we should remove this workaround. + fullUTxO <- doRangeQuery NoPreviousQuery + pure $! withLedgerTables emptySt fullUTxO + roforkerClose forker + -- Combine the node's seed with the current slot number, to make sure + -- we generate different transactions in each slot. + let txs = runGen + (nodeSeed `combineWith` unSlotNo curSlotNo) + (testGenTxs coreNodeId numCoreNodes curSlotNo cfg txGenExtra fullLedgerSt) + void $ addTxs mempool txs mkArgs :: ResourceRegistry m -> TopLevelConfig blk - -> ExtLedgerState blk + -> ExtLedgerState blk ValuesMK -> Tracer m (RealPoint blk, ExtValidationError blk) -- ^ invalid block tracer -> Tracer m (RealPoint blk, BlockNo) @@ -735,7 +774,7 @@ runThreadNetwork systemTime ThreadNetworkArgs , VolatileDB.volTracer = TraceVolatileDBEvent >$< tr } , cdbLgrDbArgs = (cdbLgrDbArgs args) { - LedgerDB.lgrTracer = TraceSnapshotEvent >$< tr + LedgerDB.lgrTracer = TraceLedgerDBEvent >$< tr } , cdbsArgs = (cdbsArgs args) { -- TODO: Vary cdbsGcDelay, cdbsGcInterval, cdbsBlockToAddSize @@ -838,7 +877,7 @@ runThreadNetwork systemTime ThreadNetworkArgs -> TopLevelConfig blk -> BlockNo -> SlotNo - -> TickedLedgerState blk + -> TickedLedgerState blk mk -> [Validated (GenTx blk)] -> IsLeader (BlockProtocol blk) -> m blk @@ -866,7 +905,7 @@ runThreadNetwork systemTime ThreadNetworkArgs cfg' currentBno currentSlot - tickedLdgSt + (forgetLedgerTables tickedLdgSt) txs prf Just forgeEbbEnv -> do @@ -887,13 +926,14 @@ runThreadNetwork systemTime ThreadNetworkArgs -- fail if the EBB is invalid -- if it is valid, we retick to the /same/ slot - let apply = applyLedgerBlock (configLedger pInfoConfig) - tickedLdgSt' <- case Exc.runExcept $ apply ebb tickedLdgSt of + let apply = applyLedgerBlock (configLedger pInfoConfig) + tables = emptyLedgerTables -- EBBs need no input tables + tickedLdgSt' <- case Exc.runExcept $ apply ebb (tickedLdgSt `withLedgerTables` tables) of Left e -> Exn.throw $ JitEbbError @blk e Right st -> pure $ applyChainTick (configLedger pInfoConfig) currentSlot - st + (forgetLedgerTables st) -- forge the block usings the ledger state that includes -- the EBB @@ -902,7 +942,7 @@ runThreadNetwork systemTime ThreadNetworkArgs cfg' currentBno currentSlot - tickedLdgSt' + (forgetLedgerTables tickedLdgSt') txs prf @@ -1069,6 +1109,14 @@ runThreadNetwork systemTime ThreadNetworkArgs -- tests about the peer sharing protocol itself. (NTN.mkHandlers nodeKernelArgs nodeKernel) + -- Create a 'ReadOnlyForker' to be used in 'forkTxProducer'. This function + -- needs the read-only forker to elaborate a complete UTxO set to generate + -- transactions. + let getForker rr = do + ChainDB.getReadOnlyForkerAtPoint chainDB rr VolatileTip >>= \case + Left e -> error $ show e + Right l -> pure l + -- In practice, a robust wallet/user can persistently add a transaction -- until it appears on the chain. This thread adds robustness for the -- @txs0@ argument, which in practice contains delegation certificates @@ -1091,7 +1139,8 @@ runThreadNetwork systemTime ThreadNetworkArgs registry unblockForge (configLedger pInfoConfig) - (ledgerState <$> ChainDB.getCurrentLedger chainDB) + (ledgerTipPoint . ledgerState <$> ChainDB.getCurrentLedger chainDB) + getForker mempool txs0 @@ -1105,7 +1154,7 @@ runThreadNetwork systemTime ThreadNetworkArgs (seed `combineWith` unCoreNodeId coreNodeId) -- Uses the same varRNG as the block producer, but we split the RNG -- each time, so this is fine. - (ChainDB.getCurrentLedger chainDB) + getForker mempool return (nodeKernel, LimitedApp app) @@ -1509,7 +1558,7 @@ newNodeInfo = do ) pure - ( NodeInfo{nodeInfoEvents, nodeInfoDBs} + ( NodeInfo{nodeInfoEvents, nodeInfoDBs } , NodeInfo <$> readEvents <*> atomically readDBs ) @@ -1521,7 +1570,7 @@ data NodeOutput blk = NodeOutput { nodeOutputAdds :: Map SlotNo (Set (RealPoint blk, BlockNo)) , nodeOutputCannotForges :: Map SlotNo [CannotForge blk] , nodeOutputFinalChain :: Chain blk - , nodeOutputFinalLedger :: LedgerState blk + , nodeOutputFinalLedger :: LedgerState blk EmptyMK , nodeOutputForges :: Map SlotNo blk , nodeOutputHeaderAdds :: Map SlotNo [(RealPoint blk, BlockNo)] , nodeOutputInvalids :: Map (RealPoint blk) [ExtValidationError blk] @@ -1542,7 +1591,7 @@ mkTestOutput :: => [( CoreNodeId , m (NodeInfo blk MockFS []) , Chain blk - , LedgerState blk + , LedgerState blk EmptyMK )] -> m (TestOutput blk) mkTestOutput vertexInfos = do diff --git a/ouroboros-consensus-diffusion/src/unstable-diffusion-testlib/Test/ThreadNet/TxGen.hs b/ouroboros-consensus-diffusion/src/unstable-diffusion-testlib/Test/ThreadNet/TxGen.hs index 5051c5bbc3..3f53d70267 100644 --- a/ouroboros-consensus-diffusion/src/unstable-diffusion-testlib/Test/ThreadNet/TxGen.hs +++ b/ouroboros-consensus-diffusion/src/unstable-diffusion-testlib/Test/ThreadNet/TxGen.hs @@ -1,3 +1,4 @@ +{-# LANGUAGE DataKinds #-} {-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE ScopedTypeVariables #-} {-# LANGUAGE TypeApplications #-} @@ -15,12 +16,14 @@ module Test.ThreadNet.TxGen ( import Data.Kind (Type) import Data.SOP.BasicFunctors import Data.SOP.Constraint +import Data.SOP.Functors (Flip (..)) import Data.SOP.Index import Data.SOP.Strict import Ouroboros.Consensus.Block import Ouroboros.Consensus.Config import Ouroboros.Consensus.HardFork.Combinator import qualified Ouroboros.Consensus.HardFork.Combinator.State as State +import Ouroboros.Consensus.Ledger.Abstract import Ouroboros.Consensus.Node.ProtocolInfo (NumCoreNodes (..)) import Ouroboros.Consensus.NodeId (CoreNodeId) import Test.QuickCheck (Gen) @@ -47,7 +50,7 @@ class TxGen blk where -> SlotNo -> TopLevelConfig blk -> TxGenExtra blk - -> LedgerState blk + -> LedgerState blk ValuesMK -> Gen [GenTx blk] {------------------------------------------------------------------------------- @@ -76,7 +79,7 @@ testGenTxsHfc :: -> SlotNo -> TopLevelConfig (HardForkBlock xs) -> NP WrapTxGenExtra xs - -> LedgerState (HardForkBlock xs) + -> LedgerState (HardForkBlock xs) ValuesMK -> Gen [GenTx (HardForkBlock xs)] testGenTxsHfc coreNodeId numCoreNodes curSlotNo cfg extras state = hcollapse $ @@ -97,8 +100,8 @@ testGenTxsHfc coreNodeId numCoreNodes curSlotNo cfg extras state = => Index xs blk -> TopLevelConfig blk -> WrapTxGenExtra blk - -> LedgerState blk + -> Flip LedgerState ValuesMK blk -> K (Gen [GenTx (HardForkBlock xs)]) blk - aux index cfg' (WrapTxGenExtra extra') state' = K $ + aux index cfg' (WrapTxGenExtra extra') (Flip state') = K $ fmap (injectNS' (Proxy @GenTx) index) <$> testGenTxs coreNodeId numCoreNodes curSlotNo cfg' extra' state' diff --git a/ouroboros-consensus-diffusion/src/unstable-mock-testlib/Test/Consensus/Ledger/Mock/Generators.hs b/ouroboros-consensus-diffusion/src/unstable-mock-testlib/Test/Consensus/Ledger/Mock/Generators.hs index 72ab653d1b..f96afc3168 100644 --- a/ouroboros-consensus-diffusion/src/unstable-mock-testlib/Test/Consensus/Ledger/Mock/Generators.hs +++ b/ouroboros-consensus-diffusion/src/unstable-mock-testlib/Test/Consensus/Ledger/Mock/Generators.hs @@ -3,6 +3,7 @@ {-# LANGUAGE OverloadedStrings #-} {-# LANGUAGE RecordWildCards #-} {-# LANGUAGE ScopedTypeVariables #-} +{-# LANGUAGE TypeApplications #-} {-# LANGUAGE UndecidableInstances #-} {-# OPTIONS_GHC -Wno-orphans #-} @@ -18,7 +19,10 @@ import qualified Data.Set as Set import Data.Typeable import Ouroboros.Consensus.Block import Ouroboros.Consensus.HeaderValidation +import Ouroboros.Consensus.Ledger.Basics +import Ouroboros.Consensus.Ledger.Query import Ouroboros.Consensus.Ledger.SupportsMempool +import Ouroboros.Consensus.Ledger.Tables.Utils import Ouroboros.Consensus.Mock.Ledger.Block import Ouroboros.Consensus.Mock.Ledger.Block.BFT import qualified Ouroboros.Consensus.Mock.Ledger.State as L @@ -99,14 +103,24 @@ instance Arbitrary SimpleBody where instance Arbitrary (SomeSecond (NestedCtxt Header) (SimpleBlock c ext)) where arbitrary = return $ SomeSecond indexIsTrivial -instance Arbitrary (SomeSecond BlockQuery (SimpleBlock c ext)) where - arbitrary = return $ SomeSecond QueryLedgerTip +instance Arbitrary (SomeBlockQuery (BlockQuery (SimpleBlock c ext))) where + arbitrary = return $ SomeBlockQuery QueryLedgerTip instance (SimpleCrypto c, Typeable ext) => Arbitrary (SomeResult (SimpleBlock c ext)) where arbitrary = SomeResult QueryLedgerTip <$> arbitrary -instance Arbitrary (LedgerState (SimpleBlock c ext)) where - arbitrary = SimpleLedgerState <$> arbitrary +instance (SimpleCrypto c, Typeable ext) + => Arbitrary (LedgerState (SimpleBlock c ext) EmptyMK) where + arbitrary = + forgetLedgerTables + <$> arbitrary @(LedgerState (SimpleBlock c ext) ValuesMK) + +instance (SimpleCrypto c, Typeable ext) + => Arbitrary (LedgerState (SimpleBlock c ext) ValuesMK) where + arbitrary = + unstowLedgerTables + . flip SimpleLedgerState emptyLedgerTables + <$> arbitrary instance HashAlgorithm (SimpleHash c) => Arbitrary (AnnTip (SimpleBlock c ext)) where arbitrary = do diff --git a/ouroboros-consensus-diffusion/src/unstable-mock-testlib/Test/ThreadNet/TxGen/Mock.hs b/ouroboros-consensus-diffusion/src/unstable-mock-testlib/Test/ThreadNet/TxGen/Mock.hs index 070b792280..5c2b4905ae 100644 --- a/ouroboros-consensus-diffusion/src/unstable-mock-testlib/Test/ThreadNet/TxGen/Mock.hs +++ b/ouroboros-consensus-diffusion/src/unstable-mock-testlib/Test/ThreadNet/TxGen/Mock.hs @@ -10,6 +10,7 @@ import Control.Monad (replicateM) import qualified Data.Map.Strict as Map import qualified Data.Set as Set import Ouroboros.Consensus.Block +import Ouroboros.Consensus.Ledger.Tables import Ouroboros.Consensus.Mock.Ledger import Test.QuickCheck hiding (elements) import Test.ThreadNet.TxGen @@ -31,7 +32,7 @@ instance TxGen (SimpleBlock SimpleMockCrypto ext) where addrs = Map.keys $ mkAddrDist numCoreNodes utxo :: Utxo - utxo = mockUtxo $ simpleLedgerState ledgerState + utxo = mockUtxo $ simpleLedgerState $ stowLedgerTables ledgerState genSimpleTx :: SlotNo -> [Addr] -> Utxo -> Gen Tx genSimpleTx curSlotNo addrs u = do diff --git a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/HardFork/Combinator.hs b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/HardFork/Combinator.hs index c59f9b27ba..509168cf1d 100644 --- a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/HardFork/Combinator.hs +++ b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/HardFork/Combinator.hs @@ -2,13 +2,16 @@ {-# LANGUAGE DeriveGeneric #-} {-# LANGUAGE DeriveTraversable #-} {-# LANGUAGE DerivingVia #-} +{-# LANGUAGE EmptyCase #-} {-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE FlexibleInstances #-} +{-# LANGUAGE GeneralizedNewtypeDeriving #-} {-# LANGUAGE LambdaCase #-} {-# LANGUAGE MultiParamTypeClasses #-} {-# LANGUAGE OverloadedStrings #-} {-# LANGUAGE RankNTypes #-} {-# LANGUAGE RecordWildCards #-} +{-# LANGUAGE ScopedTypeVariables #-} {-# LANGUAGE TypeApplications #-} {-# LANGUAGE TypeFamilies #-} @@ -16,15 +19,20 @@ module Test.Consensus.HardFork.Combinator (tests) where +import Cardano.Binary (FromCBOR (fromCBOR), ToCBOR (toCBOR)) import qualified Data.Map.Strict as Map import Data.SOP.Counting +import Data.SOP.Functors (Flip (..)) +import Data.SOP.Index (Index (..)) import Data.SOP.InPairs (RequiringBoth (..)) import qualified Data.SOP.InPairs as InPairs import Data.SOP.OptNP (OptNP (..)) import Data.SOP.Strict import qualified Data.SOP.Tails as Tails +import Data.Void (Void, absurd) import Data.Word import GHC.Generics (Generic) +import NoThunks.Class (NoThunks) import Ouroboros.Consensus.Block import Ouroboros.Consensus.BlockchainTime import Ouroboros.Consensus.Config @@ -37,6 +45,7 @@ import qualified Ouroboros.Consensus.HardFork.History as History import Ouroboros.Consensus.HeaderValidation import Ouroboros.Consensus.Ledger.Abstract import Ouroboros.Consensus.Ledger.Extended +import Ouroboros.Consensus.Ledger.Query import Ouroboros.Consensus.Ledger.SupportsMempool import Ouroboros.Consensus.Node.NetworkProtocolVersion import Ouroboros.Consensus.Node.ProtocolInfo @@ -222,7 +231,7 @@ prop_simple_hfc_convergence testSetup@TestSetup{..} = , pInfoInitLedger = ExtLedgerState { ledgerState = HardForkLedgerState $ initHardForkState - initLedgerState + (Flip initLedgerState) , headerState = genesisHeaderState $ initHardForkState (WrapChainDepState initChainDepState) @@ -237,7 +246,7 @@ prop_simple_hfc_convergence testSetup@TestSetup{..} = $ OptNil ] - initLedgerState :: LedgerState BlockA + initLedgerState :: LedgerState BlockA ValuesMK initLedgerState = LgrA { lgrA_tip = GenesisPoint , lgrA_transition = Nothing @@ -356,6 +365,36 @@ prop_simple_hfc_convergence testSetup@TestSetup{..} = instance TxGen TestBlock where testGenTxs _ _ _ _ _ _ = return [] +{------------------------------------------------------------------------------- + Canonical TxIn +-------------------------------------------------------------------------------} + +instance HasCanonicalTxIn '[BlockA, BlockB] where + newtype instance CanonicalTxIn '[BlockA, BlockB] = BlockABTxIn { + getBlockABTxIn :: Void + } + deriving stock (Show, Eq, Ord) + deriving newtype (NoThunks, FromCBOR, ToCBOR) + + injectCanonicalTxIn IZ key = absurd key + injectCanonicalTxIn (IS IZ) key = absurd key + injectCanonicalTxIn (IS (IS idx')) _ = case idx' of {} + + distribCanonicalTxIn _ key = absurd $ getBlockABTxIn key + + encodeCanonicalTxIn = toCBOR + + decodeCanonicalTxIn = fromCBOR + +instance HasHardForkTxOut '[BlockA, BlockB] where + type HardForkTxOut '[BlockA, BlockB] = DefaultHardForkTxOut '[BlockA, BlockB] + injectHardForkTxOut = injectHardForkTxOutDefault + distribHardForkTxOut = distribHardForkTxOutDefault + +instance SerializeHardForkTxOut '[BlockA, BlockB] where + encodeHardForkTxOut _ = encodeHardForkTxOutDefault + decodeHardForkTxOut _ = decodeHardForkTxOutDefault + {------------------------------------------------------------------------------- Hard fork -------------------------------------------------------------------------------} @@ -367,6 +406,7 @@ instance CanHardFork '[BlockA, BlockB] where hardForkEraTranslation = EraTranslation { translateLedgerState = PCons ledgerState_AtoB PNil + , translateLedgerTables = PCons ledgerTables_AtoB PNil , translateChainDepState = PCons chainDepState_AtoB PNil , crossEraForecast = PCons forecast_AtoB PNil } @@ -411,11 +451,22 @@ instance SerialiseHFC '[BlockA, BlockB] ledgerState_AtoB :: RequiringBoth WrapLedgerConfig - (Translate LedgerState) + TranslateLedgerState BlockA BlockB -ledgerState_AtoB = InPairs.ignoringBoth $ Translate $ \_ LgrA{..} -> LgrB { - lgrB_tip = castPoint lgrA_tip +ledgerState_AtoB = + InPairs.ignoringBoth + $ TranslateLedgerState { + translateLedgerStateWith = \_ LgrA{..} -> + LgrB { + lgrB_tip = castPoint lgrA_tip + } + } + +ledgerTables_AtoB :: TranslateLedgerTables BlockA BlockB +ledgerTables_AtoB = TranslateLedgerTables { + translateTxInWith = id + , translateTxOutWith = id } chainDepState_AtoB :: @@ -444,3 +495,20 @@ injectTx_AtoB :: BlockB injectTx_AtoB = InPairs.ignoringBoth $ Pair2 cannotInjectTx cannotInjectValidatedTx + +{------------------------------------------------------------------------------- + Query HF +-------------------------------------------------------------------------------} + +instance BlockSupportsHFLedgerQuery '[BlockA, BlockB] where + answerBlockQueryHFLookup IZ _cfg (q :: BlockQuery BlockA QFLookupTables result) = case q of {} + answerBlockQueryHFLookup (IS IZ) _cfg (q :: BlockQuery BlockB QFLookupTables result) = case q of {} + answerBlockQueryHFLookup (IS (IS idx)) _cfg _q = case idx of {} + + answerBlockQueryHFTraverse IZ _cfg (q :: BlockQuery BlockA QFTraverseTables result) = case q of {} + answerBlockQueryHFTraverse (IS IZ) _cfg (q :: BlockQuery BlockB QFTraverseTables result) = case q of {} + answerBlockQueryHFTraverse (IS (IS idx)) _cfg _q = case idx of {} + + queryLedgerGetTraversingFilter IZ (q :: BlockQuery BlockA QFTraverseTables result) = case q of {} + queryLedgerGetTraversingFilter (IS IZ) (q :: BlockQuery BlockB QFTraverseTables result) = case q of {} + queryLedgerGetTraversingFilter (IS (IS idx)) _q = case idx of {} diff --git a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/HardFork/Combinator/A.hs b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/HardFork/Combinator/A.hs index 4aa3b65074..be3cc4e6a6 100644 --- a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/HardFork/Combinator/A.hs +++ b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/HardFork/Combinator/A.hs @@ -33,6 +33,7 @@ module Test.Consensus.HardFork.Combinator.A ( , GenTx (..) , Header (..) , LedgerState (..) + , LedgerTables (..) , NestedCtxt_ (..) , StorageConfig (..) , TxId (..) @@ -74,6 +75,7 @@ import Ouroboros.Consensus.Ledger.Query import Ouroboros.Consensus.Ledger.SupportsMempool import Ouroboros.Consensus.Ledger.SupportsPeerSelection import Ouroboros.Consensus.Ledger.SupportsProtocol +import Ouroboros.Consensus.Ledger.Tables.Utils import Ouroboros.Consensus.Node.InitStorage import Ouroboros.Consensus.Node.NetworkProtocolVersion import Ouroboros.Consensus.Node.Run @@ -81,7 +83,7 @@ import Ouroboros.Consensus.Node.Serialisation import Ouroboros.Consensus.Protocol.Abstract import Ouroboros.Consensus.Storage.ImmutableDB (simpleChunkInfo) import Ouroboros.Consensus.Storage.Serialisation -import Ouroboros.Consensus.Util (repeatedlyM, (..:), (.:)) +import Ouroboros.Consensus.Util (repeatedlyM) import Ouroboros.Consensus.Util.Condense import Ouroboros.Consensus.Util.Orphans () import Ouroboros.Network.Block (Serialised, unwrapCBORinCBOR, @@ -176,20 +178,37 @@ instance BasicEnvelopeValidation BlockA where instance ValidateEnvelope BlockA where -data instance LedgerState BlockA = LgrA { +data instance LedgerState BlockA mk = LgrA { lgrA_tip :: Point BlockA -- | The 'SlotNo' of the block containing the 'InitiateAtoB' transaction , lgrA_transition :: Maybe SlotNo } deriving (Show, Eq, Generic, Serialise) - deriving NoThunks via OnlyCheckWhnfNamed "LgrA" (LedgerState BlockA) + deriving NoThunks via OnlyCheckWhnfNamed "LgrA" (LedgerState BlockA mk) -- | Ticking has no state on the A ledger state -newtype instance Ticked (LedgerState BlockA) = TickedLedgerStateA { - getTickedLedgerStateA :: LedgerState BlockA +newtype instance Ticked1 (LedgerState BlockA) mk = TickedLedgerStateA { + getTickedLedgerStateA :: LedgerState BlockA mk } - deriving NoThunks via OnlyCheckWhnfNamed "TickedLgrA" (Ticked (LedgerState BlockA)) + deriving stock (Generic, Show, Eq) + deriving NoThunks via OnlyCheckWhnfNamed "TickedLgrA" (Ticked1 (LedgerState BlockA) mk) + +{------------------------------------------------------------------------------- + Ledger Tables +-------------------------------------------------------------------------------} + +type instance Key (LedgerState BlockA) = Void +type instance Value (LedgerState BlockA) = Void + +instance HasLedgerTables (LedgerState BlockA) +instance HasLedgerTables (Ticked1 (LedgerState BlockA)) +instance CanSerializeLedgerTables (LedgerState BlockA) +instance CanStowLedgerTables (LedgerState BlockA) +instance LedgerTablesAreTrivial (LedgerState BlockA) where + convertMapKind (LgrA x y) = LgrA x y +instance LedgerTablesAreTrivial (Ticked1 (LedgerState BlockA)) where + convertMapKind (TickedLedgerStateA x) = TickedLedgerStateA (convertMapKind x) data PartialLedgerConfigA = LCfgA { lcfgA_k :: SecurityParam @@ -204,7 +223,7 @@ type instance LedgerCfg (LedgerState BlockA) = instance GetTip (LedgerState BlockA) where getTip = castPoint . lgrA_tip -instance GetTip (Ticked (LedgerState BlockA)) where +instance GetTip (Ticked1 (LedgerState BlockA)) where getTip = castPoint . getTip . getTickedLedgerStateA instance IsLedger (LedgerState BlockA) where @@ -213,18 +232,29 @@ instance IsLedger (LedgerState BlockA) where type AuxLedgerEvent (LedgerState BlockA) = VoidLedgerEvent (LedgerState BlockA) - applyChainTickLedgerResult _ _ = pureLedgerResult . TickedLedgerStateA + applyChainTickLedgerResult _ _ = pureLedgerResult + . TickedLedgerStateA + . noNewTickingDiffs instance ApplyBlock (LedgerState BlockA) BlockA where applyBlockLedgerResult cfg blk = - fmap (pureLedgerResult . setTip) + fmap (pureLedgerResult . convertMapKind . setTip) . repeatedlyM - (fmap fst .: applyTx cfg DoNotIntervene (blockSlot blk)) + applyTx' (blkA_body blk) where - setTip :: TickedLedgerState BlockA -> LedgerState BlockA + setTip :: TickedLedgerState BlockA mk -> LedgerState BlockA mk setTip (TickedLedgerStateA st) = st { lgrA_tip = blockPoint blk } + applyTx' :: GenTx BlockA + -> TickedLedgerState BlockA ValuesMK + -> Except + (ApplyTxErr BlockA) + (TickedLedgerState BlockA ValuesMK) + applyTx' b = + fmap (TickedLedgerStateA . convertMapKind . getTickedLedgerStateA . fst) + . applyTx cfg DoNotIntervene (blockSlot blk) b + reapplyBlockLedgerResult = dontExpectError ..: applyBlockLedgerResult where @@ -233,6 +263,8 @@ instance ApplyBlock (LedgerState BlockA) BlockA where Left _ -> error "reapplyBlockLedgerResult: unexpected error" Right b -> b + getBlockKeySets _blk = trivialLedgerTables + instance UpdateLedger BlockA instance CommonProtocolParams BlockA where @@ -265,7 +297,7 @@ forgeBlockA :: TopLevelConfig BlockA -> BlockNo -> SlotNo - -> TickedLedgerState BlockA + -> TickedLedgerState BlockA mk -> [GenTx BlockA] -> IsLeader (BlockProtocol BlockA) -> BlockA @@ -324,10 +356,12 @@ instance LedgerSupportsMempool BlockA where InitiateAtoB -> do return (TickedLedgerStateA $ st { lgrA_transition = Just sno }, ValidatedGenTxA tx) - reapplyTx cfg slot = fmap fst .: (applyTx cfg DoNotIntervene slot . forgetValidatedGenTxA) + reapplyTx cfg slot tx st = applyDiffs st . fst <$> applyTx cfg DoNotIntervene slot (forgetValidatedGenTxA tx) st txForgetValidated = forgetValidatedGenTxA + getTransactionKeySets _tx = trivialLedgerTables + instance TxLimits BlockA where type TxMeasure BlockA = IgnoringOverflow ByteSize32 blockCapacityTxMeasure _cfg _st = IgnoringOverflow $ ByteSize32 $ 100 * 1024 -- arbitrary @@ -343,17 +377,19 @@ instance HasTxId (GenTx BlockA) where instance ConvertRawTxId (GenTx BlockA) where toRawTxIdHash = SBS.toShort . Lazy.toStrict . serialise -instance ShowQuery (BlockQuery BlockA) where +instance ShowQuery (BlockQuery BlockA fp) where showResult qry = case qry of {} -data instance BlockQuery BlockA result +data instance BlockQuery BlockA fp result deriving (Show) instance BlockSupportsLedgerQuery BlockA where - answerBlockQuery _ qry = case qry of {} + answerPureBlockQuery _ qry = case qry of {} + answerBlockQueryLookup _ qry = case qry of {} + answerBlockQueryTraverse _ qry = case qry of {} -instance SameDepIndex (BlockQuery BlockA) where - sameDepIndex qry _qry' = case qry of {} +instance SameDepIndex2 (BlockQuery BlockA) where + sameDepIndex2 qry _qry' = case qry of {} instance ConvertRawHash BlockA where toRawHash _ = id @@ -414,7 +450,7 @@ instance InspectLedger BlockA where where k = stabilityWindowA (lcfgA_k (snd (configLedger cfg))) -getConfirmationDepth :: LedgerState BlockA -> Maybe (SlotNo, Word64) +getConfirmationDepth :: LedgerState BlockA mk -> Maybe (SlotNo, Word64) getConfirmationDepth st = do confirmedInSlot <- lgrA_transition st return $ case ledgerTipSlot st of @@ -531,8 +567,8 @@ instance SerialiseNodeToNodeConstraints BlockA where deriving instance Serialise (AnnTip BlockA) -instance EncodeDisk BlockA (LedgerState BlockA) -instance DecodeDisk BlockA (LedgerState BlockA) +instance EncodeDisk BlockA (LedgerState BlockA EmptyMK) +instance DecodeDisk BlockA (LedgerState BlockA EmptyMK) instance EncodeDisk BlockA BlockA instance DecodeDisk BlockA (Lazy.ByteString -> BlockA) where @@ -581,10 +617,10 @@ instance SerialiseNodeToClient BlockA Void where encodeNodeToClient _ _ = absurd decodeNodeToClient _ _ = fail "no ApplyTxErr to be decoded" -instance SerialiseNodeToClient BlockA (SomeSecond BlockQuery BlockA) where - encodeNodeToClient _ _ = \case {} +instance SerialiseNodeToClient BlockA (SomeBlockQuery (BlockQuery BlockA)) where + encodeNodeToClient _ _ (SomeBlockQuery q) = case q of {} decodeNodeToClient _ _ = fail "there are no queries to be decoded" -instance SerialiseResult BlockA (BlockQuery BlockA) where - encodeResult _ _ = \case {} - decodeResult _ _ = \case {} +instance SerialiseResult' BlockA BlockQuery where + encodeResult' _ _ = \case {} + decodeResult' _ _ = \case {} diff --git a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/HardFork/Combinator/B.hs b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/HardFork/Combinator/B.hs index 7c45c64137..53495bf147 100644 --- a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/HardFork/Combinator/B.hs +++ b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/HardFork/Combinator/B.hs @@ -29,6 +29,7 @@ module Test.Consensus.HardFork.Combinator.B ( , GenTx (..) , Header (..) , LedgerState (..) + , LedgerTables (..) , NestedCtxt_ (..) , StorageConfig (..) , TxId (..) @@ -60,6 +61,7 @@ import Ouroboros.Consensus.Ledger.Query import Ouroboros.Consensus.Ledger.SupportsMempool import Ouroboros.Consensus.Ledger.SupportsPeerSelection import Ouroboros.Consensus.Ledger.SupportsProtocol +import Ouroboros.Consensus.Ledger.Tables.Utils import Ouroboros.Consensus.Node.InitStorage import Ouroboros.Consensus.Node.NetworkProtocolVersion import Ouroboros.Consensus.Node.Run @@ -160,24 +162,41 @@ instance BasicEnvelopeValidation BlockB where instance ValidateEnvelope BlockB where -data instance LedgerState BlockB = LgrB { +data instance LedgerState BlockB mk = LgrB { lgrB_tip :: Point BlockB } deriving (Show, Eq, Generic, Serialise) - deriving NoThunks via OnlyCheckWhnfNamed "LgrB" (LedgerState BlockB) + deriving NoThunks via OnlyCheckWhnfNamed "LgrB" (LedgerState BlockB mk) + +{------------------------------------------------------------------------------- + Ledger Tables +-------------------------------------------------------------------------------} + + +type instance Key (LedgerState BlockB) = Void +type instance Value (LedgerState BlockB) = Void + +instance HasLedgerTables (LedgerState BlockB) +instance HasLedgerTables (Ticked1 (LedgerState BlockB)) +instance CanSerializeLedgerTables (LedgerState BlockB) +instance CanStowLedgerTables (LedgerState BlockB) +instance LedgerTablesAreTrivial (LedgerState BlockB) where + convertMapKind (LgrB x) = LgrB x +instance LedgerTablesAreTrivial (Ticked1 (LedgerState BlockB)) where + convertMapKind (TickedLedgerStateB x) = TickedLedgerStateB (convertMapKind x) type instance LedgerCfg (LedgerState BlockB) = () -- | Ticking has no state on the B ledger state -newtype instance Ticked (LedgerState BlockB) = TickedLedgerStateB { - getTickedLedgerStateB :: LedgerState BlockB +newtype instance Ticked1 (LedgerState BlockB) mk = TickedLedgerStateB { + getTickedLedgerStateB :: LedgerState BlockB mk } - deriving NoThunks via OnlyCheckWhnfNamed "TickedLgrB" (Ticked (LedgerState BlockB)) + deriving NoThunks via OnlyCheckWhnfNamed "TickedLgrB" (Ticked1 (LedgerState BlockB) mk) instance GetTip (LedgerState BlockB) where getTip = castPoint . lgrB_tip -instance GetTip (Ticked (LedgerState BlockB)) where +instance GetTip (Ticked1 (LedgerState BlockB)) where getTip = castPoint . getTip . getTickedLedgerStateB instance IsLedger (LedgerState BlockB) where @@ -186,12 +205,16 @@ instance IsLedger (LedgerState BlockB) where type AuxLedgerEvent (LedgerState BlockB) = VoidLedgerEvent (LedgerState BlockB) - applyChainTickLedgerResult _ _ = pureLedgerResult . TickedLedgerStateB + applyChainTickLedgerResult _ _ = pureLedgerResult + . TickedLedgerStateB + . noNewTickingDiffs instance ApplyBlock (LedgerState BlockB) BlockB where applyBlockLedgerResult = \_ b _ -> return $ pureLedgerResult $ LgrB (blockPoint b) reapplyBlockLedgerResult = \_ b _ -> pureLedgerResult $ LgrB (blockPoint b) + getBlockKeySets _blk = trivialLedgerTables + instance UpdateLedger BlockB instance CommonProtocolParams BlockB where @@ -217,7 +240,7 @@ forgeBlockB :: TopLevelConfig BlockB -> BlockNo -> SlotNo - -> TickedLedgerState BlockB + -> TickedLedgerState BlockB mk -> [GenTx BlockB] -> IsLeader (BlockProtocol BlockB) -> BlockB @@ -264,6 +287,8 @@ instance LedgerSupportsMempool BlockB where txForgetValidated = \case {} + getTransactionKeySets _tx = trivialLedgerTables + instance TxLimits BlockB where type TxMeasure BlockB = IgnoringOverflow ByteSize32 blockCapacityTxMeasure _cfg _st = IgnoringOverflow $ ByteSize32 $ 100 * 1024 -- arbitrary @@ -279,17 +304,19 @@ instance HasTxId (GenTx BlockB) where instance ConvertRawTxId (GenTx BlockB) where toRawTxIdHash = \case {} -instance ShowQuery (BlockQuery BlockB) where +instance ShowQuery (BlockQuery BlockB fp) where showResult qry = case qry of {} -data instance BlockQuery BlockB result +data instance BlockQuery BlockB fp result deriving (Show) instance BlockSupportsLedgerQuery BlockB where - answerBlockQuery _ qry = case qry of {} + answerPureBlockQuery _ qry = case qry of {} + answerBlockQueryLookup _ qry = case qry of {} + answerBlockQueryTraverse _ qry = case qry of {} -instance SameDepIndex (BlockQuery BlockB) where - sameDepIndex qry _qry' = case qry of {} +instance SameDepIndex2 (BlockQuery BlockB) where + sameDepIndex2 qry _qry' = case qry of {} instance ConvertRawHash BlockB where toRawHash _ = id @@ -388,8 +415,8 @@ instance SerialiseNodeToNodeConstraints BlockB where deriving instance Serialise (AnnTip BlockB) -instance EncodeDisk BlockB (LedgerState BlockB) -instance DecodeDisk BlockB (LedgerState BlockB) +instance EncodeDisk BlockB (LedgerState BlockB EmptyMK) +instance DecodeDisk BlockB (LedgerState BlockB EmptyMK) instance EncodeDisk BlockB BlockB instance DecodeDisk BlockB (Lazy.ByteString -> BlockB) where @@ -438,10 +465,10 @@ instance SerialiseNodeToClient BlockB Void where encodeNodeToClient _ _ = absurd decodeNodeToClient _ _ = fail "no ApplyTxErr to be decoded" -instance SerialiseNodeToClient BlockB (SomeSecond BlockQuery BlockB) where - encodeNodeToClient _ _ = \case {} +instance SerialiseNodeToClient BlockB (SomeBlockQuery (BlockQuery BlockB)) where + encodeNodeToClient _ _ (SomeBlockQuery q) = case q of {} decodeNodeToClient _ _ = fail "there are no queries to be decoded" -instance SerialiseResult BlockB (BlockQuery BlockB) where - encodeResult _ _ = \case {} - decodeResult _ _ = \case {} +instance SerialiseResult' BlockB BlockQuery where + encodeResult' _ _ = \case {} + decodeResult' _ _ = \case {} diff --git a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/IOSimQSM/Test/StateMachine/Sequential.hs b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/IOSimQSM/Test/StateMachine/Sequential.hs index e6eb0e55c4..29a17c652a 100644 --- a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/IOSimQSM/Test/StateMachine/Sequential.hs +++ b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/IOSimQSM/Test/StateMachine/Sequential.hs @@ -36,7 +36,7 @@ import Control.Concurrent.Class.MonadSTM.TChan (TChan, newTChanIO, import Control.Exception (SomeAsyncException (..), SomeException, displayException, fromException) import Control.Monad (when) -import Control.Monad.Class.MonadSay (MonadSay, say) +import Control.Monad.Class.MonadSay (say) import Control.Monad.State.Strict (StateT, get, lift, put, runStateT) import Data.Dynamic (Dynamic, toDyn) import Data.Either (fromRight) @@ -54,7 +54,7 @@ import Text.Show.Pretty (ppShow) runCommands' :: (Show (cmd Concrete), Show (resp Concrete)) => (Rank2.Traversable cmd, Rank2.Foldable resp) - => (IOLike m, MonadSay m) + => IOLike m => m (StateMachine model cmd m resp) -> Commands cmd resp -> m (History cmd resp, model Concrete, Reason) @@ -90,7 +90,7 @@ data Check executeCommands :: (Show (cmd Concrete), Show (resp Concrete)) => (Rank2.Traversable cmd, Rank2.Foldable resp) - => (MonadSay m, IOLike m) + => IOLike m => StateMachine model cmd m resp -> TChan m (Pid, HistoryEvent cmd resp) -> Pid diff --git a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PeerSimulator/NodeLifecycle.hs b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PeerSimulator/NodeLifecycle.hs index 60f7476286..4276db5347 100644 --- a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PeerSimulator/NodeLifecycle.hs +++ b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PeerSimulator/NodeLifecycle.hs @@ -13,6 +13,7 @@ module Test.Consensus.PeerSimulator.NodeLifecycle ( , restoreNode ) where +import Control.Monad.Base import Control.ResourceRegistry import Control.Tracer (Tracer (..), traceWith) import Data.Functor (void) @@ -114,7 +115,7 @@ data NodeLifecycle blk m = NodeLifecycle { -- | Create a ChainDB and start a BlockRunner that operate on the peers' -- candidate fragments. mkChainDb :: - IOLike m => + (IOLike m, MonadBase m m) => LiveResources TestBlock m -> m (ChainDB m TestBlock, m (WithOrigin SlotNo)) mkChainDb resources = do @@ -150,7 +151,7 @@ mkChainDb resources = do -- | Allocate all the resources that depend on the results of previous live -- intervals, the ChainDB and its persisted state. restoreNode :: - IOLike m => + (IOLike m, MonadBase m m) => LiveResources TestBlock m -> LiveIntervalResult TestBlock -> m (LiveNode TestBlock m) @@ -170,7 +171,7 @@ restoreNode resources LiveIntervalResult {lirPeerResults, lirActive} = do -- starts the node's threads. lifecycleStart :: forall m. - IOLike m => + (IOLike m, MonadBase m m) => (LiveInterval TestBlock m -> m ()) -> LiveResources TestBlock m -> LiveIntervalResult TestBlock -> diff --git a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PeerSimulator/Run.hs b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PeerSimulator/Run.hs index 1010c7eda3..6ebe86194a 100644 --- a/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PeerSimulator/Run.hs +++ b/ouroboros-consensus-diffusion/test/consensus-test/Test/Consensus/PeerSimulator/Run.hs @@ -1,3 +1,4 @@ +{-# LANGUAGE CPP #-} {-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE NamedFieldPuns #-} {-# LANGUAGE ScopedTypeVariables #-} @@ -11,6 +12,7 @@ module Test.Consensus.PeerSimulator.Run ( ) where import Control.Monad (foldM, forM, void) +import Control.Monad.Base import Control.Monad.Class.MonadTime (MonadTime) import Control.Monad.Class.MonadTimer.SI (MonadTimer) import Control.ResourceRegistry @@ -327,7 +329,13 @@ mkStateTracer schedulerConfig GenesisTest {gtBlockTree} PeerSimulatorResources { -- provided by 'LiveIntervalResult'. startNode :: forall m. - (IOLike m, MonadTime m, MonadTimer m) => + ( IOLike m + , MonadTime m + , MonadTimer m +#if __GLASGOW_HASKELL__ >= 900 + , MonadBase m m +#endif + ) => SchedulerConfig -> GenesisTestFull TestBlock -> LiveInterval TestBlock m -> @@ -433,7 +441,7 @@ startNode schedulerConfig genesisTest interval = do -- | Set up all resources related to node start/shutdown. nodeLifecycle :: - (IOLike m, MonadTime m, MonadTimer m) => + (IOLike m, MonadTime m, MonadTimer m, MonadBase m m) => SchedulerConfig -> GenesisTestFull TestBlock -> Tracer m (TraceEvent TestBlock) -> @@ -472,7 +480,7 @@ nodeLifecycle schedulerConfig genesisTest lrTracer lrRegistry lrPeerSim = do -- send all ticks in a 'PointSchedule' to all given peers in turn. runPointSchedule :: forall m. - (IOLike m, MonadTime m, MonadTimer m) => + (IOLike m, MonadTime m, MonadTimer m, MonadBase m m) => SchedulerConfig -> GenesisTestFull TestBlock -> Tracer m (TraceEvent TestBlock) -> diff --git a/ouroboros-consensus-diffusion/test/mock-test/Main.hs b/ouroboros-consensus-diffusion/test/mock-test/Main.hs index 0712ecb462..08a2a0d21e 100644 --- a/ouroboros-consensus-diffusion/test/mock-test/Main.hs +++ b/ouroboros-consensus-diffusion/test/mock-test/Main.hs @@ -1,6 +1,7 @@ module Main (main) where import qualified Test.Consensus.Ledger.Mock (tests) +import qualified Test.Consensus.Ledger.Mock.LedgerTables (tests) import Test.Tasty import qualified Test.ThreadNet.BFT (tests) import qualified Test.ThreadNet.LeaderSchedule (tests) @@ -16,15 +17,9 @@ tests :: TestTree tests = testGroup "ouroboros-consensus" [ Test.Consensus.Ledger.Mock.tests + , Test.Consensus.Ledger.Mock.LedgerTables.tests , Test.ThreadNet.BFT.tests , Test.ThreadNet.LeaderSchedule.tests , Test.ThreadNet.PBFT.tests , Test.ThreadNet.Praos.tests ] - --- Counter to address the zfs copy bug on Hydra --- ``` --- cannot execute binary file: Exec format error --- ``` --- --- 3 diff --git a/ouroboros-consensus-diffusion/test/mock-test/Test/Consensus/Ledger/Mock/LedgerTables.hs b/ouroboros-consensus-diffusion/test/mock-test/Test/Consensus/Ledger/Mock/LedgerTables.hs new file mode 100644 index 0000000000..a3f67369d6 --- /dev/null +++ b/ouroboros-consensus-diffusion/test/mock-test/Test/Consensus/Ledger/Mock/LedgerTables.hs @@ -0,0 +1,24 @@ +{-# LANGUAGE FlexibleInstances #-} +{-# LANGUAGE TypeApplications #-} +{-# OPTIONS_GHC -Wno-orphans #-} + +module Test.Consensus.Ledger.Mock.LedgerTables (tests) where + +import Ouroboros.Consensus.Ledger.Basics +import Ouroboros.Consensus.Mock.Ledger +import Ouroboros.Consensus.Protocol.PBFT +import Test.Consensus.Ledger.Mock.Generators () +import Test.LedgerTables +import Test.Tasty +import Test.Tasty.QuickCheck + +type Block = SimpleBlock SimpleMockCrypto (SimplePBftExt SimpleMockCrypto PBftMockCrypto) + +tests :: TestTree +tests = testGroup "LedgerTables" + [ testProperty "Stowable laws" (prop_stowable_laws @Block) + , testProperty "HasLedgerTables laws" (prop_hasledgertables_laws @Block) + ] + +instance Arbitrary (LedgerTables (LedgerState Block) ValuesMK) where + arbitrary = projectLedgerTables <$> arbitrary diff --git a/ouroboros-consensus/bench/ChainSync-client-bench/Main.hs b/ouroboros-consensus/bench/ChainSync-client-bench/Main.hs index 62facd2089..faa49601fb 100644 --- a/ouroboros-consensus/bench/ChainSync-client-bench/Main.hs +++ b/ouroboros-consensus/bench/ChainSync-client-bench/Main.hs @@ -24,6 +24,7 @@ import Ouroboros.Consensus.Config import qualified Ouroboros.Consensus.HardFork.History as HardFork import qualified Ouroboros.Consensus.HeaderStateHistory as HeaderStateHistory import qualified Ouroboros.Consensus.HeaderValidation as HV +import Ouroboros.Consensus.Ledger.Basics import qualified Ouroboros.Consensus.Ledger.Extended as Extended import qualified Ouroboros.Consensus.MiniProtocol.ChainSync.Client as CSClient import qualified Ouroboros.Consensus.MiniProtocol.ChainSync.Client.HistoricityCheck as HistoricityCheck @@ -113,7 +114,7 @@ oneBenchRun pure $ HeaderStateHistory.fromChain topConfig - (oracularLedgerDB GenesisPoint) + (convertMapKind $ oracularLedgerDB GenesisPoint) Chain.Genesis , CSClient.getIsInvalidBlock = pure invalidBlock , CSClient.getPastLedger = pure . Just . oracularLedgerDB @@ -184,7 +185,7 @@ inTheYearOneBillion = SystemTime { * 1e9 } -oracularLedgerDB :: Point B -> Extended.ExtLedgerState B +oracularLedgerDB :: Point B -> Extended.ExtLedgerState B EmptyMK oracularLedgerDB p = Extended.ExtLedgerState { Extended.headerState = HV.HeaderState { @@ -200,7 +201,7 @@ oracularLedgerDB p = } , Extended.ledgerState = TB.TestLedger { TB.lastAppliedPoint = p - , TB.payloadDependentState = () + , TB.payloadDependentState = TB.EmptyPLDS } } diff --git a/ouroboros-consensus/bench/backingstore-bench/Bench/Commands.hs b/ouroboros-consensus/bench/backingstore-bench/Bench/Commands.hs new file mode 100644 index 0000000000..63ce126ee7 --- /dev/null +++ b/ouroboros-consensus/bench/backingstore-bench/Bench/Commands.hs @@ -0,0 +1,220 @@ +{-# LANGUAGE DerivingStrategies #-} +{-# LANGUAGE FlexibleContexts #-} +{-# LANGUAGE GADTs #-} +{-# LANGUAGE GeneralisedNewtypeDeriving #-} +{-# LANGUAGE InstanceSigs #-} +{-# LANGUAGE LambdaCase #-} +{-# LANGUAGE RankNTypes #-} +{-# LANGUAGE ScopedTypeVariables #-} + +module Bench.Commands ( + -- * Command types + Cmd (..) + , VHID + -- * Aux types + , BackingStoreInitialiser + -- * Running commands in a concrete monad + , run + ) where + +import Cardano.Slotting.Slot (SlotNo, WithOrigin) +import Control.DeepSeq +import Control.Monad (void) +import Control.Monad.Class.MonadThrow (MonadThrow) +import Control.Monad.Reader (MonadReader (ask), MonadTrans (..), + ReaderT (..)) +import Control.Monad.State.Strict (MonadState, StateT, evalStateT, + gets, modify) +import Data.Map.Strict (Map) +import qualified Data.Map.Strict as Map +import Data.Maybe (fromJust) +import Ouroboros.Consensus.Storage.LedgerDB.V1.BackingStore + (BackingStore, BackingStoreValueHandle, InitFrom (..), + RangeQuery) +import qualified Ouroboros.Consensus.Storage.LedgerDB.V1.BackingStore as BS +import System.FS.API (SomeHasFS) +import System.FS.API.Types (FsPath) + +{------------------------------------------------------------------------------- + Command types +-------------------------------------------------------------------------------} + +data Cmd ks vs d = + BSInitFromValues !(WithOrigin SlotNo) !vs + | BSInitFromCopy !FsPath + | BSClose + | BSCopy !FsPath + | BSValueHandle !VHID + | BSWrite !SlotNo !d + | BSVHClose !VHID + | BSVHRangeRead !VHID !(RangeQuery ks) + | BSVHRead !VHID !ks + | BSRead !ks + deriving Show + +-- | Identifiers for value handles +type VHID = Int + +instance NFData (Cmd ks vs d) where rnf = rwhnf + +{------------------------------------------------------------------------------- + Aux types +-------------------------------------------------------------------------------} + +type BackingStoreInitialiser m ks vs d = + SomeHasFS m + -> InitFrom vs + -> m (BackingStore m ks vs d) + +{------------------------------------------------------------------------------- + Running commands in a concrete monad +-------------------------------------------------------------------------------} + +run :: + forall m ks vs d. MonadThrow m + => SomeHasFS m + -> BackingStoreInitialiser m ks vs d + -> [Cmd ks vs d] -> m () +run shfs bsi cmds = evalStateT (runReaderT (runM m) initialEnv) initialState + where + m :: M ks vs d m () + m = runCmds cmds + + initialEnv = Env { + envSomeHasFS = shfs + , envBackingStoreInitialiser = bsi + } + + initialState = St { + stLookUp = mempty + , stBackingStore = Nothing + } + +-- | Concrete monad 'M' to run commands in. +-- +-- 'M' is a newtype because 'runCmds' and 'runCmd' require a single transformer +-- in its type: @t m ()@. Compare this with @'ReaderT' r ('StateT' s m) a@, +-- which has two transfomers on top of @m@, while @M@ itself is just a single +-- transformer. +newtype M ks vs d m a = M { + runM :: ReaderT (Env m ks vs d) (StateT (St m ks vs d) m) a + } + deriving newtype (Functor, Applicative, Monad) + deriving newtype (MonadReader (Env m ks vs d), MonadState (St m ks vs d)) + +instance MonadTrans (M ks vs d) where + lift :: Monad m => m a -> M ks vs d m a + lift = M . lift . lift + +{------------------------------------------------------------------------------- + Running commands +-------------------------------------------------------------------------------} + +-- | State to keep track of while running commands. +data St m ks vs d = St { + -- | Backing stores have no built-in notion of value handle management, so + -- we have to keep track of them somewhere. Running a command that + -- references a value handle by their 'VHID' should use this mapping to look + -- up the corresponding value handle. + stLookUp :: !(Map VHID (BackingStoreValueHandle m ks vs)) + -- | The backing store that is currently in use. + -- + -- This is a 'Maybe', because when starting to run a list of commands, there + -- is initially no backing store. After an initialisation command like + -- 'BSInitFromValues' and 'BSInitFromCopy', this field should never be + -- 'Nothing'. + , stBackingStore :: !(Maybe (BackingStore m ks vs d)) + } + +-- | Reader environment to pass around while running commands. +data Env m ks vs d = Env { + -- | Access to the file system (simulated or real) is required for + -- initialising backing store, and making copies of a backing store. + envSomeHasFS :: !(SomeHasFS m) + -- | A way to initialise a new backing store. A new backing store can be + -- initialised even when one already exists. + , envBackingStoreInitialiser :: !(BackingStoreInitialiser m ks vs d) + } + +runCmds :: + forall m t ks vs d. ( + MonadReader (Env m ks vs d) (t m) + , MonadState (St m ks vs d) (t m) + , MonadTrans t + , MonadThrow m + ) + => [Cmd ks vs d] + -> t m () +runCmds = mapM_ runCmd + +runCmd :: + ( MonadReader (Env m ks vs d) (t m) + , MonadState (St m ks vs d) (t m) + , MonadTrans t + , MonadThrow m + ) + => Cmd ks vs d + -> t m () +runCmd = \case + BSInitFromValues sl vs -> bsInitFromValues sl vs + BSInitFromCopy bsp -> bsInitFromCopy bsp + BSClose -> bsClose + BSCopy bsp -> bsCopy bsp + BSValueHandle i -> bsValueHandle i + BSWrite sl d -> bsWrite sl d + BSVHClose i -> bsvhClose i + BSVHRangeRead i rq -> bsvhRangeRead i rq + BSVHRead i ks -> bsvhRead i ks + BSRead ks -> bsRead ks + where + bsInitFromValues sl vs = do + Env shfs bsi <- ask + bs' <- lift $ bsi shfs (InitFromValues sl vs) + modify (\st -> st { + stBackingStore = Just bs' + }) + + bsInitFromCopy bsp = do + Env shfs bsi <- ask + bs' <- lift $ bsi shfs (InitFromCopy bsp) + modify (\st -> st { + stBackingStore = Just bs' + }) + + bsClose = do + bs <- fromJust <$> gets stBackingStore + lift $ BS.bsClose bs + + bsCopy bsp = do + bs <- fromJust <$> gets stBackingStore + lift $ BS.bsCopy bs bsp + + bsValueHandle i = do + bs <- fromJust <$> gets stBackingStore + vh <- lift $ BS.bsValueHandle bs + let f vhMay = case vhMay of + Nothing -> Just vh + Just _ -> error "bsValueHandle" + modify (\st -> st { + stLookUp = Map.alter f i $ stLookUp st + }) + + bsWrite sl d = do + bs <- fromJust <$> gets stBackingStore + lift $ BS.bsWrite bs sl d + + bsvhClose i = do + vh <- gets (fromJust . Map.lookup i . stLookUp) + lift $ BS.bsvhClose vh + + bsvhRangeRead i rq = do + vh <- gets (fromJust . Map.lookup i . stLookUp) + void $ lift $ BS.bsvhRangeRead vh rq + + bsvhRead i ks = do + vh <- gets (fromJust . Map.lookup i . stLookUp) + void $ lift $ BS.bsvhRead vh ks + + bsRead ks = do + bs <- fromJust <$> gets stBackingStore + void $ lift $ BS.bsRead bs ks diff --git a/ouroboros-consensus/bench/backingstore-bench/Main.hs b/ouroboros-consensus/bench/backingstore-bench/Main.hs new file mode 100644 index 0000000000..a00088a742 --- /dev/null +++ b/ouroboros-consensus/bench/backingstore-bench/Main.hs @@ -0,0 +1,247 @@ +{-# LANGUAGE NumericUnderscores #-} +{-# LANGUAGE TupleSections #-} + +module Main (main) where + +import Bench.Commands (BackingStoreInitialiser, Cmd (..), run) +import Cardano.Slotting.Slot (SlotNo, WithOrigin (..)) +import Control.DeepSeq (NFData (..), rwhnf) +import Control.Monad.Class.MonadThrow (MonadThrow) +import Data.Set (Set) +import qualified Data.Set as Set +import Data.SOP.Dict (Dict (..)) +import Data.Word (Word64) +import Ouroboros.Consensus.Ledger.Tables (DiffMK (..), KeysMK (..), + LedgerTables (..), ValuesMK) +import qualified Ouroboros.Consensus.Ledger.Tables.Diff as Diff +import Ouroboros.Consensus.Ledger.Tables.Utils (emptyLedgerTables) +import Ouroboros.Consensus.Storage.LedgerDB.V1.Args +import qualified Ouroboros.Consensus.Storage.LedgerDB.V1.BackingStore as BS +import Ouroboros.Consensus.Storage.LedgerDB.V1.BackingStore.Impl.LMDB + (LMDBLimits (..)) +import Ouroboros.Consensus.Util.Args (Complete) +import qualified System.Directory as Dir +import System.FS.API (HasFS (..), SomeHasFS (..)) +import System.FS.API.Types (MountPoint (..), mkFsPath) +import System.FS.IO (ioHasFS) +import System.IO.Temp (createTempDirectory, + getCanonicalTemporaryDirectory) +import qualified Test.QuickCheck.Monadic as QC.Monadic (run) +import Test.QuickCheck.Monadic (monadicIO) +import Test.Tasty (TestTree, testGroup, withResource) +import Test.Tasty.Bench (Benchmark, bench, bgroup, defaultMain, + envWithCleanup, nfAppIO) +import Test.Tasty.QuickCheck (testProperty) +import Test.Util.LedgerStateOnlyTables (OTLedgerTables) + +{------------------------------------------------------------------------------- + Main benchmarks +-------------------------------------------------------------------------------} + +main :: IO () +main = defaultMain [bgroup "Bench" [ + tests + , benchmarks + ]] + +benchmarks :: Benchmark +benchmarks = bgroup "BackingStore" [ + benchCmds "oneWritePer100Reads InMem 10_000" bssInMem $ + oneWritePer100Reads 10_000 + , benchCmds "oneWritePer100Reads LMDB 10_000" bssLMDB $ + oneWritePer100Reads 10_000 + ] + +benchCmds :: String -> Complete BackingStoreArgs IO -> [Cmd K V D] -> Benchmark +benchCmds name bss cmds0 = + envWithCleanup ((,cmds0) <$> setup bss) (eCleanup . fst) $ + \ ~(e, cmds) -> bench name $ nfAppIO (runner e) cmds + +runner :: MonadThrow m => Env m ks vs d -> [Cmd ks vs d] -> m () +runner e cmds = do + shfs <- eMakeNewSomeHasFS e + run shfs (eBackingStoreInitialiser e) cmds + +{------------------------------------------------------------------------------- + Auxiliary tests +-------------------------------------------------------------------------------} + +tests :: TestTree +tests = testGroup "Auxiliary tests" [ + withResource (setup bssInMem) eCleanup $ \eIO -> bgroup "InMem" [ + testProperty "simpleCopy InMem" $ monadicIO $ do + e <- QC.Monadic.run eIO + QC.Monadic.run $ runner e simpleCopy + ] + , withResource (setup bssLMDB) eCleanup $ \eIO -> bgroup "LMDB" [ + testProperty "simpleCopy LMDB" $ monadicIO $ do + e <- QC.Monadic.run eIO + QC.Monadic.run $ runner e simpleCopy + ] + ] + +{------------------------------------------------------------------------------- + Backing store selectors +-------------------------------------------------------------------------------} + +bssInMem :: Complete BackingStoreArgs IO +bssInMem = InMemoryBackingStoreArgs + +bssLMDB :: Complete BackingStoreArgs IO +bssLMDB = LMDBBackingStoreArgs benchLMDBLimits Dict + +benchLMDBLimits :: LMDBLimits +benchLMDBLimits = LMDBLimits + { lmdbMapSize = 100 * 1_024 * 1_024 + , lmdbMaxDatabases = 3 + , lmdbMaxReaders = 32 + } + +{------------------------------------------------------------------------------- + Benchmark scenarios +-------------------------------------------------------------------------------} + +-- Concrete types of keys, values and diffs that we use in the benchmarks. +type K = OTLedgerTables Word64 Word64 KeysMK +type V = OTLedgerTables Word64 Word64 ValuesMK +type D = OTLedgerTables Word64 Word64 DiffMK + +-- | Perform one write per 100 reads. +-- +-- This mimicks the flushing behaviour of the LedgerDB: each applied block +-- incurs a read, and we aggregate diffs for 100 blocks before we flush/write +-- them. +-- +-- @ +-- oneWritePer100Reads 10_000 +-- == +-- [ BSInitFromValues Origin [] +-- , BSWrite 99 [Insert 0 at key 0, ..., Insert 99 at key 99] +-- , BSRead 0 +-- ... +-- , BSRead 99 +-- , BSWrite 199 [Insert 100 at key 100, ..., Insert 199 at key 199] +-- , BSRead 100 +-- ... +-- , BSRead 199 +-- ... +-- , BSClose +-- ] +-- @ +oneWritePer100Reads :: Int -> [Cmd K V D] +oneWritePer100Reads n = concat [ + [ini] + , workload + , [close] + ] + where + ini = BSInitFromValues Origin emptyLedgerTables + close = BSClose + + workload = flip concatMap dat $ \block -> mkWrite block : mkReads block + + -- A write aggregates, for a block, the additions to the ledger state. The + -- slot number that is used for the write corresponds to the youngest block + -- (i.e., highest slot number), which is by construction the last entry in + -- the block. + mkWrite :: [(SlotNo, Word64)] -> Cmd K V D + mkWrite block = BSWrite (fst $ last block) $ + mkDiffs $ Diff.fromListInserts [(x,x) | (_sl, x) <- block] + + -- Each value is read once. + mkReads :: [(SlotNo, Word64)] -> [Cmd K V D] + mkReads block = [BSRead (mkKey x) | (_sl, x) <- block] + + -- A list of blocks. Each block maps slot numbers to a value. This mapping + -- indicates that this values is added to the ledger tables at the given + -- slot number. + dat :: [[(SlotNo, Word64)]] + dat = groupsOfN 100 $ zip [0..] [0 .. fromIntegral n - 1] + +simpleCopy :: [Cmd K V D] +simpleCopy = [ + BSInitFromValues Origin emptyLedgerTables + , BSCopy (mkFsPath ["copies", "somecopy"]) + , BSClose + ] + +{------------------------------------------------------------------------------- + Benchmark scenarios: helpers +-------------------------------------------------------------------------------} + +mkKey :: k -> OTLedgerTables k v KeysMK +mkKey = mkKeys . Set.singleton + +mkKeys :: Set k -> OTLedgerTables k v KeysMK +mkKeys = LedgerTables . KeysMK + +mkDiffs :: Diff.Diff k v -> OTLedgerTables k v DiffMK +mkDiffs = LedgerTables . DiffMK + +groupsOfN :: Int -> [a] -> [[a]] +groupsOfN n + | n <= 0 = error "groupsOfN: n should be positive" + | otherwise = go + where + go :: [a] -> [[a]] + go [] = [] + go xs = take n xs : groupsOfN n (drop n xs) + +{------------------------------------------------------------------------------- + Set up benchmark environment +-------------------------------------------------------------------------------} + +-- | The environment to set up when running benchmarks. +-- +-- Benchmarked code is run multiple times within the same environment. However, +-- we don't want (on-disk) state to carry over from one run to the other. For +-- this reason, each benchmark run should intialise a new backing store, and +-- each benchmark run should have a clean directory to do filesystem operations +-- in. 'eBackingStoreInitialiser' provides the former, while 'eMakeNewSomeHasFS' +-- provides the latter. +data Env m ks vs d = Env { + -- | A method for initialising a backing store. + eBackingStoreInitialiser :: !(BackingStoreInitialiser m ks vs d) + -- | Creates a fresh directory, and provides an API to interact with it. + -- Note: we may want to provide a second value of this type to benchmark + -- with a different directory for snapshot storage. + , eMakeNewSomeHasFS :: !(m (SomeHasFS m)) + -- | How to clean up the 'Env'. + , eCleanup :: !(m ()) + } + +instance NFData (Env m ks vs d) where rnf = rwhnf + +-- | Sets up a root temporary directory, and creates an 'Env' for it. +-- +-- 'eMakeNewSomeHasFS' creates a new temporary directory under the temporary +-- root, such that each benchmark run has a fresh directory to work in. +-- 'eCleanup' will recursively remove the root temporary directory, erasing all +-- directories created by invocations of 'eMakeNewSomeHasFS'. +setup :: Complete BackingStoreArgs IO -> IO (Env IO K V D) +setup bss = do + sysTmpDir <- getCanonicalTemporaryDirectory + benchTmpDir <- createTempDirectory sysTmpDir "bench_backingstore" + -- Note that we are initialising the Backing Store with the same directory + -- for storing tables and snapshots. We may want to expand on this later. + let bsi = \hasFS i -> + BS.newBackingStoreInitialiser + mempty + bss + hasFS + hasFS + i + + let mkSomeHasFS = do + tmpDir <- createTempDirectory benchTmpDir "run" + let hfs = ioHasFS (MountPoint tmpDir) + + createDirectory hfs (mkFsPath ["copies"]) + + pure $ SomeHasFS hfs + + pure $ Env { + eBackingStoreInitialiser = bsi + , eMakeNewSomeHasFS = mkSomeHasFS + , eCleanup = Dir.removeDirectoryRecursive benchTmpDir + } diff --git a/ouroboros-consensus/bench/mempool-bench/Bench/Consensus/Mempool.hs b/ouroboros-consensus/bench/mempool-bench/Bench/Consensus/Mempool.hs index 5afda0c5bd..7563a31887 100644 --- a/ouroboros-consensus/bench/mempool-bench/Bench/Consensus/Mempool.hs +++ b/ouroboros-consensus/bench/mempool-bench/Bench/Consensus/Mempool.hs @@ -43,6 +43,7 @@ data MempoolCmd blk = AddTx (Ledger.GenTx blk) deriving (Generic) +deriving stock instance Show (Ledger.GenTx blk) => Show (MempoolCmd blk) deriving anyclass instance (NFData (Ledger.GenTx blk)) => NFData (MempoolCmd blk) getCmdTx :: MempoolCmd blk -> Maybe (Ledger.GenTx blk) diff --git a/ouroboros-consensus/bench/mempool-bench/Bench/Consensus/Mempool/TestBlock.hs b/ouroboros-consensus/bench/mempool-bench/Bench/Consensus/Mempool/TestBlock.hs index 2d865028bb..8051d12985 100644 --- a/ouroboros-consensus/bench/mempool-bench/Bench/Consensus/Mempool/TestBlock.hs +++ b/ouroboros-consensus/bench/mempool-bench/Bench/Consensus/Mempool/TestBlock.hs @@ -4,6 +4,7 @@ {-# LANGUAGE FlexibleInstances #-} {-# LANGUAGE GeneralizedNewtypeDeriving #-} {-# LANGUAGE NamedFieldPuns #-} +{-# LANGUAGE StandaloneDeriving #-} {-# LANGUAGE TupleSections #-} {-# LANGUAGE TypeFamilies #-} @@ -22,11 +23,13 @@ module Bench.Consensus.Mempool.TestBlock ( , txSize ) where +import Cardano.Binary (FromCBOR (..), ToCBOR (..)) import qualified Cardano.Slotting.Time as Time -import Codec.Serialise (Serialise) +import Codec.Serialise (Serialise (..)) import Control.DeepSeq (NFData) import Control.Monad.Trans.Except (except) -import Data.Set (Set, (\\)) +import qualified Data.Map.Strict as Map +import Data.Set (Set) import qualified Data.Set as Set import Data.TreeDiff (ToExpr) import GHC.Generics (Generic) @@ -36,11 +39,10 @@ import Ouroboros.Consensus.Config.SecurityParam as Consensus import qualified Ouroboros.Consensus.HardFork.History as HardFork import qualified Ouroboros.Consensus.Ledger.Basics as Ledger import qualified Ouroboros.Consensus.Ledger.SupportsMempool as Ledger -import Test.Util.TestBlock (LedgerState (TestLedger), - PayloadSemantics (PayloadDependentError, PayloadDependentState, applyPayload), - TestBlockWith, applyDirectlyToPayloadDependentState, - lastAppliedPoint, payloadDependentState, - testBlockLedgerConfigFrom) +import Ouroboros.Consensus.Ledger.Tables +import qualified Ouroboros.Consensus.Ledger.Tables.Diff as Diff +import qualified Ouroboros.Consensus.Ledger.Tables.Utils as Ledger +import Test.Util.TestBlock hiding (TestBlock) {------------------------------------------------------------------------------- MempoolTestBlock @@ -57,17 +59,28 @@ data Tx = Tx { newtype Token = Token { unToken :: Int } deriving stock (Show, Eq, Ord, Generic) + deriving newtype (ToCBOR, FromCBOR, Num, Enum) deriving anyclass (NoThunks, ToExpr, Serialise, NFData) +mkTx :: + [Token] + -- ^ Consumed + -> [Token] + -- ^ Produced + -> Ledger.GenTx TestBlock +mkTx cons prod = TestBlockGenTx $ Tx { consumed = Set.fromList cons + , produced = Set.fromList prod + } + {------------------------------------------------------------------------------- Initial parameters -------------------------------------------------------------------------------} -initialLedgerState :: LedgerState (TestBlockWith Tx) +initialLedgerState :: LedgerState (TestBlockWith Tx) ValuesMK initialLedgerState = TestLedger { lastAppliedPoint = Block.GenesisPoint - , payloadDependentState = TestLedgerState { - availableTokens = Set.empty :: Set Token + , payloadDependentState = TestPLDS { + getTestPLDS = ValuesMK Map.empty } } @@ -92,16 +105,44 @@ data TxApplicationError = deriving anyclass (NoThunks, ToExpr, Serialise) instance PayloadSemantics Tx where - type PayloadDependentState Tx = TestLedgerState + newtype instance PayloadDependentState Tx mk = TestPLDS { + getTestPLDS :: mk Token () + } + deriving stock Generic type PayloadDependentError Tx = TxApplicationError - applyPayload st@TestLedgerState { availableTokens } Tx { consumed, produced } = - let - notFound = Set.filter (not . (`Set.member` availableTokens)) consumed - in if Set.null notFound - then Right $ st{ availableTokens = availableTokens \\ consumed <> produced } - else Left $ TxApplicationError notFound + applyPayload plds tx = + let + notFound = Set.filter (not . (`Map.member` tokMap)) consumed + in if Set.null notFound + then Right $ TestPLDS (Ledger.rawAttachAndApplyDiffs fullDiff toks) + else Left $ TxApplicationError notFound + where + TestPLDS toks@(ValuesMK tokMap) = plds + Tx {consumed, produced} = tx + + consumedDiff, producedDiff :: Diff.Diff Token () + consumedDiff = Diff.fromListDeletes [(t, ()) | t <- Set.toList consumed] + producedDiff = Diff.fromListInserts [(t, ()) | t <- Set.toList produced] + + fullDiff :: DiffMK Token () + fullDiff = DiffMK $ consumedDiff <> producedDiff + + getPayloadKeySets tx = LedgerTables $ KeysMK $ consumed <> produced + where + Tx {consumed, produced} = tx + +deriving stock instance EqMK mk + => Eq (PayloadDependentState Tx mk) +deriving stock instance ShowMK mk + => Show (PayloadDependentState Tx mk) +deriving anyclass instance NoThunksMK mk + => NoThunks (PayloadDependentState Tx mk) + +instance Serialise (PayloadDependentState Tx EmptyMK) where + encode = error "unused: encode" + decode = error "unused: decode" -- | TODO: for the time being 'TestBlock' does not have any codec config data instance Block.CodecConfig TestBlock = TestBlockCodecConfig @@ -111,6 +152,36 @@ data instance Block.CodecConfig TestBlock = TestBlockCodecConfig data instance Block.StorageConfig TestBlock = TestBlockStorageConfig deriving (Show, Generic, NoThunks) +{------------------------------------------------------------------------------- + Ledger tables +-------------------------------------------------------------------------------} + +type instance Key (LedgerState TestBlock) = Token +type instance Value (LedgerState TestBlock) = () + +instance HasLedgerTables (LedgerState TestBlock) where + projectLedgerTables st = + LedgerTables $ getTestPLDS $ payloadDependentState st + withLedgerTables st table = st { + payloadDependentState = plds { + getTestPLDS = Ledger.getLedgerTables table + } + } + where + TestLedger { payloadDependentState = plds } = st + +instance HasLedgerTables (Ticked1 (LedgerState TestBlock)) where + projectLedgerTables (TickedTestLedger st) = Ledger.castLedgerTables $ + Ledger.projectLedgerTables st + withLedgerTables (TickedTestLedger st) tables = + TickedTestLedger $ Ledger.withLedgerTables st $ Ledger.castLedgerTables tables + +instance CanSerializeLedgerTables (LedgerState TestBlock) + +instance CanStowLedgerTables (LedgerState TestBlock) where + stowLedgerTables = error "unused: stowLedgerTables" + unstowLedgerTables = error "unused: unstowLedgerTables" + {------------------------------------------------------------------------------- Mempool support -------------------------------------------------------------------------------} @@ -127,27 +198,20 @@ txSize (TestBlockGenTx tx) = $ fromIntegral $ 1 + length (consumed tx) + length (produced tx) -mkTx :: - [Token] - -- ^ Consumed - -> [Token] - -- ^ Produced - -> Ledger.GenTx TestBlock -mkTx cons prod = TestBlockGenTx $ Tx { consumed = Set.fromList cons - , produced = Set.fromList prod - } - instance Ledger.LedgerSupportsMempool TestBlock where applyTx _cfg _shouldIntervene _slot (TestBlockGenTx tx) tickedSt = - except $ fmap (, ValidatedGenTx (TestBlockGenTx tx)) + except $ fmap ((, ValidatedGenTx (TestBlockGenTx tx)) . Ledger.forgetTrackingValues) $ applyDirectlyToPayloadDependentState tickedSt tx reapplyTx cfg slot (ValidatedGenTx genTx) tickedSt = - fst <$> Ledger.applyTx cfg Ledger.DoNotIntervene slot genTx tickedSt + Ledger.applyDiffs tickedSt . fst <$> Ledger.applyTx cfg Ledger.DoNotIntervene slot genTx tickedSt -- FIXME: it is ok to use 'DoNotIntervene' here? txForgetValidated (ValidatedGenTx tx) = tx + getTransactionKeySets (TestBlockGenTx tx) = LedgerTables $ + KeysMK $ consumed tx + instance Ledger.TxLimits TestBlock where type TxMeasure TestBlock = Ledger.IgnoringOverflow Ledger.ByteSize32 diff --git a/ouroboros-consensus/bench/mempool-bench/Main.hs b/ouroboros-consensus/bench/mempool-bench/Main.hs index 6b744b007a..a61c61058a 100644 --- a/ouroboros-consensus/bench/mempool-bench/Main.hs +++ b/ouroboros-consensus/bench/mempool-bench/Main.hs @@ -17,7 +17,6 @@ import Data.Aeson import qualified Data.ByteString.Lazy as BL import qualified Data.Csv as Csv import Data.Maybe (fromMaybe) -import Data.Set () import qualified Data.Text as Text import qualified Data.Text.Read as Text.Read import Main.Utf8 (withStdTerminalHandles) diff --git a/ouroboros-consensus/docs/haddocks/bogus.svg b/ouroboros-consensus/docs/haddocks/bogus.svg index e69de29bb2..9cd18e9861 100644 --- a/ouroboros-consensus/docs/haddocks/bogus.svg +++ b/ouroboros-consensus/docs/haddocks/bogus.svg @@ -0,0 +1,4 @@ +This file is a bogus file just to make 'cabal' happy as this directory is +included as 'extra-doc-files' and cabal will fail if this is empty. However, +this directory shall be populated by images used all over the +ouroboros-consensus documentation. diff --git a/ouroboros-consensus/ouroboros-consensus.cabal b/ouroboros-consensus/ouroboros-consensus.cabal index 774a787b19..592ce1fc7f 100644 --- a/ouroboros-consensus/ouroboros-consensus.cabal +++ b/ouroboros-consensus/ouroboros-consensus.cabal @@ -158,6 +158,13 @@ library Ouroboros.Consensus.Ledger.SupportsMempool Ouroboros.Consensus.Ledger.SupportsPeerSelection Ouroboros.Consensus.Ledger.SupportsProtocol + Ouroboros.Consensus.Ledger.Tables + Ouroboros.Consensus.Ledger.Tables.Basics + Ouroboros.Consensus.Ledger.Tables.Combinators + Ouroboros.Consensus.Ledger.Tables.Diff + Ouroboros.Consensus.Ledger.Tables.DiffSeq + Ouroboros.Consensus.Ledger.Tables.MapKind + Ouroboros.Consensus.Ledger.Tables.Utils Ouroboros.Consensus.Mempool Ouroboros.Consensus.Mempool.API Ouroboros.Consensus.Mempool.Capacity @@ -203,7 +210,6 @@ library Ouroboros.Consensus.Storage.ChainDB.Impl.ChainSel Ouroboros.Consensus.Storage.ChainDB.Impl.Follower Ouroboros.Consensus.Storage.ChainDB.Impl.Iterator - Ouroboros.Consensus.Storage.ChainDB.Impl.LgrDB Ouroboros.Consensus.Storage.ChainDB.Impl.Paths Ouroboros.Consensus.Storage.ChainDB.Impl.Query Ouroboros.Consensus.Storage.ChainDB.Impl.Types @@ -222,17 +228,39 @@ library Ouroboros.Consensus.Storage.ImmutableDB.Impl.Iterator Ouroboros.Consensus.Storage.ImmutableDB.Impl.Parser Ouroboros.Consensus.Storage.ImmutableDB.Impl.State + Ouroboros.Consensus.Storage.ImmutableDB.Impl.Stream Ouroboros.Consensus.Storage.ImmutableDB.Impl.Types Ouroboros.Consensus.Storage.ImmutableDB.Impl.Util Ouroboros.Consensus.Storage.ImmutableDB.Impl.Validation Ouroboros.Consensus.Storage.ImmutableDB.Stream Ouroboros.Consensus.Storage.LedgerDB - Ouroboros.Consensus.Storage.LedgerDB.DiskPolicy - Ouroboros.Consensus.Storage.LedgerDB.Init - Ouroboros.Consensus.Storage.LedgerDB.LedgerDB - Ouroboros.Consensus.Storage.LedgerDB.Query - Ouroboros.Consensus.Storage.LedgerDB.Snapshots - Ouroboros.Consensus.Storage.LedgerDB.Update + Ouroboros.Consensus.Storage.LedgerDB.API + Ouroboros.Consensus.Storage.LedgerDB.API.Config + Ouroboros.Consensus.Storage.LedgerDB.Impl.Args + Ouroboros.Consensus.Storage.LedgerDB.Impl.Common + Ouroboros.Consensus.Storage.LedgerDB.Impl.Init + Ouroboros.Consensus.Storage.LedgerDB.Impl.Snapshots + Ouroboros.Consensus.Storage.LedgerDB.Impl.Validate + Ouroboros.Consensus.Storage.LedgerDB.V1.Args + Ouroboros.Consensus.Storage.LedgerDB.V1.BackingStore + Ouroboros.Consensus.Storage.LedgerDB.V1.BackingStore.API + Ouroboros.Consensus.Storage.LedgerDB.V1.BackingStore.Impl.InMemory + Ouroboros.Consensus.Storage.LedgerDB.V1.BackingStore.Impl.LMDB + Ouroboros.Consensus.Storage.LedgerDB.V1.BackingStore.Impl.LMDB.Bridge + Ouroboros.Consensus.Storage.LedgerDB.V1.BackingStore.Impl.LMDB.Status + Ouroboros.Consensus.Storage.LedgerDB.V1.Common + Ouroboros.Consensus.Storage.LedgerDB.V1.DbChangelog + Ouroboros.Consensus.Storage.LedgerDB.V1.Flush + Ouroboros.Consensus.Storage.LedgerDB.V1.Forker + Ouroboros.Consensus.Storage.LedgerDB.V1.Init + Ouroboros.Consensus.Storage.LedgerDB.V1.Lock + Ouroboros.Consensus.Storage.LedgerDB.V1.Snapshots + Ouroboros.Consensus.Storage.LedgerDB.V2.Args + Ouroboros.Consensus.Storage.LedgerDB.V2.Common + Ouroboros.Consensus.Storage.LedgerDB.V2.InMemory + Ouroboros.Consensus.Storage.LedgerDB.V2.Init + Ouroboros.Consensus.Storage.LedgerDB.V2.LSM + Ouroboros.Consensus.Storage.LedgerDB.V2.LedgerSeq Ouroboros.Consensus.Storage.Serialisation Ouroboros.Consensus.Storage.VolatileDB Ouroboros.Consensus.Storage.VolatileDB.API @@ -279,6 +307,8 @@ library cardano-binary, cardano-crypto-class, cardano-ledger-core ^>=1.15, + cardano-lmdb >=0.4, + cardano-lmdb-simple >=0.7, cardano-prelude, cardano-slotting, cardano-strict-containers, @@ -286,11 +316,14 @@ library containers >=0.5 && <0.8, contra-tracer, deepseq, + diff-containers >=1.2, filelock, + fingertree-rm >=1.0, fs-api ^>=0.3, hashable, io-classes ^>=1.5, measures, + monoid-subclasses, mtl, nothunks ^>=0.2, ouroboros-network-api ^>=0.11, @@ -305,6 +338,7 @@ library semialign >=1.1, serialise ^>=0.2, si-timers ^>=1.5, + singletons, sop-core ^>=0.5, sop-extras ^>=0.2, streaming, @@ -316,6 +350,7 @@ library these ^>=1.2, time, transformers, + transformers-base, typed-protocols ^>=0.3, vector ^>=0.13, @@ -325,11 +360,17 @@ library directory latex-svg-image + build-depends: text >=1.2.5.0 && <2.2 + x-docspec-extra-packages: + directory + latex-svg-image + library unstable-consensus-testlib import: common-lib visibility: public hs-source-dirs: src/unstable-consensus-testlib exposed-modules: + Test.LedgerTables Test.Ouroboros.Consensus.ChainGenerator.Adversarial Test.Ouroboros.Consensus.ChainGenerator.BitVector Test.Ouroboros.Consensus.ChainGenerator.Counting @@ -350,6 +391,7 @@ library unstable-consensus-testlib Test.Util.HardFork.Future Test.Util.HardFork.OracularClock Test.Util.InvertedMap + Test.Util.LedgerStateOnlyTables Test.Util.LogicalClock Test.Util.MockChain Test.Util.Orphans.Arbitrary @@ -388,7 +430,7 @@ library unstable-consensus-testlib base16-bytestring, binary, bytestring, - cardano-binary:testlib, + cardano-binary:{cardano-binary, testlib}, cardano-crypto-class, cardano-prelude, cardano-slotting:testlib, @@ -433,6 +475,7 @@ library unstable-consensus-testlib template-haskell, text, time, + transformers-base, tree-diff, utf8-string, vector, @@ -527,9 +570,13 @@ test-suite consensus-test Test.Consensus.HardFork.Infra Test.Consensus.HardFork.Summary Test.Consensus.HeaderValidation + Test.Consensus.Ledger.Tables.Diff + Test.Consensus.Ledger.Tables.DiffSeq Test.Consensus.Mempool Test.Consensus.Mempool.Fairness Test.Consensus.Mempool.Fairness.TestBlock + Test.Consensus.Mempool.StateMachine + Test.Consensus.Mempool.Util Test.Consensus.MiniProtocol.BlockFetch.Client Test.Consensus.MiniProtocol.ChainSync.Client Test.Consensus.MiniProtocol.LocalStateQuery.Server @@ -544,22 +591,32 @@ test-suite consensus-test cardano-binary, cardano-crypto-class, cardano-crypto-tests, + cardano-ledger-core:testlib, cardano-slotting:{cardano-slotting, testlib}, + cardano-strict-containers, cborg, containers, contra-tracer, deepseq, + diff-containers, + fingertree-rm, fs-api ^>=0.3, + fs-sim, hashable, io-classes, io-sim, + measures, mtl, + nonempty-containers, nothunks, ouroboros-consensus, ouroboros-network, ouroboros-network-api, ouroboros-network-mock, ouroboros-network-protocols:{ouroboros-network-protocols, testlib}, + quickcheck-classes, + quickcheck-monoid-subclasses, + quickcheck-state-machine:no-vendored-treediff, quiet, random, resource-registry, @@ -574,6 +631,9 @@ test-suite consensus-test tasty-hunit, tasty-quickcheck, time, + transformers, + transformers-base, + tree-diff, typed-protocols ^>=0.3, typed-protocols-examples, typed-protocols-stateful, @@ -634,10 +694,16 @@ test-suite storage-test Test.Ouroboros.Storage.ImmutableDB.Primary Test.Ouroboros.Storage.ImmutableDB.StateMachine Test.Ouroboros.Storage.LedgerDB - Test.Ouroboros.Storage.LedgerDB.DiskPolicy - Test.Ouroboros.Storage.LedgerDB.InMemory - Test.Ouroboros.Storage.LedgerDB.OnDisk - Test.Ouroboros.Storage.LedgerDB.OrphanArbitrary + Test.Ouroboros.Storage.LedgerDB.Serialisation + Test.Ouroboros.Storage.LedgerDB.SnapshotPolicy + Test.Ouroboros.Storage.LedgerDB.StateMachine + Test.Ouroboros.Storage.LedgerDB.StateMachine.TestBlock + Test.Ouroboros.Storage.LedgerDB.V1.BackingStore + Test.Ouroboros.Storage.LedgerDB.V1.BackingStore.Lockstep + Test.Ouroboros.Storage.LedgerDB.V1.BackingStore.Mock + Test.Ouroboros.Storage.LedgerDB.V1.BackingStore.Registry + Test.Ouroboros.Storage.LedgerDB.V1.DbChangelog.QuickCheck + Test.Ouroboros.Storage.LedgerDB.V1.DbChangelog.Unit Test.Ouroboros.Storage.Orphans Test.Ouroboros.Storage.TestBlock Test.Ouroboros.Storage.VolatileDB @@ -647,15 +713,22 @@ test-suite storage-test build-depends: QuickCheck, + async, base, bifunctors, binary, bytestring, + cardano-binary, cardano-crypto-class, + cardano-ledger-binary:testlib, cardano-slotting:{cardano-slotting, testlib}, + cardano-strict-containers, cborg, + constraints, containers, contra-tracer, + diff-containers, + directory, fs-api ^>=0.3, fs-sim ^>=0.3, generics-sop, @@ -668,17 +741,23 @@ test-suite storage-test ouroboros-network-api, ouroboros-network-mock, pretty-show, + quickcheck-dynamic, + quickcheck-lockstep, quickcheck-state-machine:no-vendored-treediff ^>=0.10, random, resource-registry, serialise, + sop-core, + strict-mvar, strict-stm, tasty, tasty-hunit, tasty-quickcheck, + temporary, text, time, transformers, + transformers-base, tree-diff, unstable-consensus-testlib, vector, @@ -696,6 +775,7 @@ benchmark mempool-bench aeson, base, bytestring, + cardano-binary, cardano-slotting, cassava, containers, diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Block/Forging.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Block/Forging.hs index 03e5682d93..b5f6522913 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Block/Forging.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Block/Forging.hs @@ -137,11 +137,11 @@ data BlockForging m blk = BlockForging { -- PRECONDITION: 'checkCanForge' returned @Right ()@. , forgeBlock :: TopLevelConfig blk - -> BlockNo -- Current block number - -> SlotNo -- Current slot number - -> TickedLedgerState blk -- Current ledger state - -> [Validated (GenTx blk)] -- Transactions to include - -> IsLeader (BlockProtocol blk) -- Proof we are leader + -> BlockNo -- Current block number + -> SlotNo -- Current slot number + -> TickedLedgerState blk EmptyMK -- Current ledger state + -> [Validated (GenTx blk)] -- Transactions to include + -> IsLeader (BlockProtocol blk) -- Proof we are leader -> m blk } diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/BlockchainTime/WallClock/HardFork.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/BlockchainTime/WallClock/HardFork.hs index 8d381f5af7..27aa27a745 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/BlockchainTime/WallClock/HardFork.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/BlockchainTime/WallClock/HardFork.hs @@ -46,7 +46,7 @@ newtype BackoffDelay = BackoffDelay NominalDiffTime data HardForkBlockchainTimeArgs m blk = HardForkBlockchainTimeArgs { hfbtBackoffDelay :: m BackoffDelay -- ^ See 'BackoffDelay' - , hfbtGetLedgerState :: STM m (LedgerState blk) + , hfbtGetLedgerState :: STM m (LedgerState blk EmptyMK) , hfbtLedgerConfig :: LedgerConfig blk , hfbtRegistry :: ResourceRegistry m , hfbtSystemTime :: SystemTime m @@ -95,7 +95,7 @@ hardForkBlockchainTime args = do , hfbtMaxClockRewind = maxClockRewind } = args - summarize :: LedgerState blk -> HF.Summary (HardForkIndices blk) + summarize :: LedgerState blk EmptyMK -> HF.Summary (HardForkIndices blk) summarize st = hardForkSummary cfg st loop :: HF.RunWithCachedSummary xs m diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Forecast.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Forecast.hs index 0da5f08223..2e6f4550b0 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Forecast.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Forecast.hs @@ -37,7 +37,7 @@ mapForecast f (Forecast at for) = Forecast{ -- 'GetTip'. -- -- Specialization of 'constantForecast'. -trivialForecast :: GetTip b => b -> Forecast () +trivialForecast :: GetTip b => b mk -> Forecast () trivialForecast x = constantForecastOf () (getTipSlot x) -- | Forecast where the values are never changing diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Fragment/Validated.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Fragment/Validated.hs index 21cde8fa3d..8dd45a3d55 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Fragment/Validated.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Fragment/Validated.hs @@ -1,6 +1,7 @@ -{-# LANGUAGE DeriveFunctor #-} +{-# LANGUAGE DeriveTraversable #-} {-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE PatternSynonyms #-} +{-# LANGUAGE PolyKinds #-} {-# LANGUAGE ScopedTypeVariables #-} {-# LANGUAGE TypeFamilies #-} {-# LANGUAGE TypeOperators #-} @@ -14,12 +15,15 @@ module Ouroboros.Consensus.Fragment.Validated ( , validatedFragment , validatedLedger , validatedTip + -- * Monadic + , newM ) where import GHC.Stack import Ouroboros.Consensus.Block import Ouroboros.Consensus.Ledger.Abstract import Ouroboros.Consensus.Util.Assert +import Ouroboros.Consensus.Util.IOLike hiding (invariant) import Ouroboros.Network.AnchoredFragment (AnchoredFragment) import qualified Ouroboros.Network.AnchoredFragment as AF @@ -32,16 +36,16 @@ data ValidatedFragment b l = UnsafeValidatedFragment { -- | Chain fragment validatedFragment :: !(AnchoredFragment b) - -- | Ledger after after validation + -- | Ledger after validation , validatedLedger :: !l } - deriving (Functor) + deriving (Functor, Foldable, Traversable) {-# COMPLETE ValidatedFragment #-} pattern ValidatedFragment :: (GetTip l, HasHeader b, HeaderHash b ~ HeaderHash l, HasCallStack) - => AnchoredFragment b -> l -> ValidatedFragment b l + => AnchoredFragment b -> l mk -> ValidatedFragment b (l mk) pattern ValidatedFragment f l <- UnsafeValidatedFragment f l where ValidatedFragment f l = new f l @@ -50,11 +54,19 @@ validatedTip :: HasHeader b => ValidatedFragment b l -> Point b validatedTip = AF.headPoint . validatedFragment invariant :: - forall l b. - (GetTip l, HasHeader b, HeaderHash b ~ HeaderHash l) - => ValidatedFragment b l + forall l mk b. + (GetTip l , HasHeader b, HeaderHash b ~ HeaderHash l) + => ValidatedFragment b (l mk) + -> Either String () +invariant (ValidatedFragment fragment ledger) = + pointInvariant (getTip ledger :: Point l) fragment + +pointInvariant :: + forall l b. (HeaderHash b ~ HeaderHash l, HasHeader b) + => Point l + -> AnchoredFragment b -> Either String () -invariant (ValidatedFragment fragment ledger) +pointInvariant ledgerTip0 fragment | ledgerTip /= headPoint = Left $ concat [ "ledger tip " @@ -66,19 +78,49 @@ invariant (ValidatedFragment fragment ledger) = Right () where ledgerTip, headPoint :: Point b - ledgerTip = castPoint $ getTip ledger + ledgerTip = castPoint ledgerTip0 headPoint = castPoint $ AF.headPoint fragment -- | Constructor for 'ValidatedFragment' that checks the invariant new :: - forall l b. + forall l mk b. (GetTip l, HasHeader b, HeaderHash b ~ HeaderHash l, HasCallStack) => AnchoredFragment b - -> l - -> ValidatedFragment b l + -> l mk + -> ValidatedFragment b (l mk) new fragment ledger = assertWithMsg (invariant validated) $ validated + where + validated :: ValidatedFragment b (l mk) + validated = UnsafeValidatedFragment { + validatedFragment = fragment + , validatedLedger = ledger + } + +{------------------------------------------------------------------------------- + Monadic +-------------------------------------------------------------------------------} + +invariantM :: + forall m l b. + (MonadSTM m, GetTipSTM m l, HasHeader b, HeaderHash b ~ HeaderHash l) + => ValidatedFragment b l + -> m (Either String ()) +invariantM (UnsafeValidatedFragment fragment ledger) = do + ledgerTip <- getTipM ledger + pure $ pointInvariant ledgerTip fragment + +-- | Constructor for 'ValidatedFragment' that checks the invariant +newM :: + forall m l b. + (MonadSTM m, GetTipSTM m l, HasHeader b, HeaderHash b ~ HeaderHash l, HasCallStack) + => AnchoredFragment b + -> l + -> m (ValidatedFragment b l) +newM fragment ledger = do + msg <- invariantM validated + pure $ assertWithMsg msg validated where validated :: ValidatedFragment b l validated = UnsafeValidatedFragment { diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Fragment/ValidatedDiff.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Fragment/ValidatedDiff.hs index 478a7a0385..b154c5c557 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Fragment/ValidatedDiff.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Fragment/ValidatedDiff.hs @@ -1,4 +1,5 @@ {-# LANGUAGE PatternSynonyms #-} +{-# LANGUAGE PolyKinds #-} {-# LANGUAGE ScopedTypeVariables #-} {-# LANGUAGE TypeFamilies #-} {-# LANGUAGE TypeOperators #-} @@ -14,6 +15,9 @@ module Ouroboros.Consensus.Fragment.ValidatedDiff ( , new , rollbackExceedsSuffix , toValidatedFragment + -- * Monadic + , newM + , toValidatedFragmentM ) where import Control.Monad.Except (throwError) @@ -25,6 +29,7 @@ import Ouroboros.Consensus.Fragment.Validated (ValidatedFragment) import qualified Ouroboros.Consensus.Fragment.Validated as VF import Ouroboros.Consensus.Ledger.Abstract import Ouroboros.Consensus.Util.Assert +import Ouroboros.Consensus.Util.IOLike (MonadSTM (..)) -- | A 'ChainDiff' along with the ledger state after validation. -- @@ -49,18 +54,24 @@ pattern ValidatedChainDiff d l <- UnsafeValidatedChainDiff d l -- -- > getTip chainDiff == ledgerTipPoint ledger new :: - forall b l. - (GetTip l, HasHeader b, HeaderHash l ~ HeaderHash b, HasCallStack) + forall b l mk. (GetTip l, HasHeader b, HeaderHash l ~ HeaderHash b, HasCallStack) => ChainDiff b - -> l - -> ValidatedChainDiff b l + -> l mk + -> ValidatedChainDiff b (l mk) new chainDiff ledger = - assertWithMsg precondition $ + assertWithMsg (pointInvariant (getTip ledger) chainDiff) $ UnsafeValidatedChainDiff chainDiff ledger + +pointInvariant :: + forall l b. (HeaderHash b ~ HeaderHash l, HasHeader b) + => Point l + -> ChainDiff b + -> Either String () +pointInvariant ledgerTip0 chainDiff = precondition where chainDiffTip, ledgerTip :: Point b - chainDiffTip = Diff.getTip chainDiff - ledgerTip = castPoint $ getTip ledger + chainDiffTip = castPoint $ Diff.getTip chainDiff + ledgerTip = castPoint ledgerTip0 precondition | chainDiffTip == ledgerTip = return () @@ -71,10 +82,36 @@ new chainDiff ledger = toValidatedFragment :: (GetTip l, HasHeader b, HeaderHash l ~ HeaderHash b, HasCallStack) - => ValidatedChainDiff b l - -> ValidatedFragment b l + => ValidatedChainDiff b (l mk) + -> ValidatedFragment b (l mk) toValidatedFragment (UnsafeValidatedChainDiff cs l) = VF.ValidatedFragment (Diff.getSuffix cs) l rollbackExceedsSuffix :: HasHeader b => ValidatedChainDiff b l -> Bool rollbackExceedsSuffix = Diff.rollbackExceedsSuffix . getChainDiff + +{------------------------------------------------------------------------------- + Monadic +-------------------------------------------------------------------------------} + +newM :: + forall m b l. ( + MonadSTM m, GetTipSTM m l, HasHeader b, HeaderHash l ~ HeaderHash b + , HasCallStack + ) + => ChainDiff b + -> l + -> m (ValidatedChainDiff b l) +newM chainDiff ledger = do + ledgerTip <- getTipM ledger + pure $ assertWithMsg (pointInvariant ledgerTip chainDiff) + $ UnsafeValidatedChainDiff chainDiff ledger + +toValidatedFragmentM :: + ( MonadSTM m, GetTipSTM m l, HasHeader b, HeaderHash l ~ HeaderHash b + , HasCallStack + ) + => ValidatedChainDiff b l + -> m (ValidatedFragment b l) +toValidatedFragmentM (UnsafeValidatedChainDiff cs l) = + VF.newM (Diff.getSuffix cs) l diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Genesis/Governor.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Genesis/Governor.hs index a1cd17a3d1..bf88a2b1ca 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Genesis/Governor.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Genesis/Governor.hs @@ -54,6 +54,7 @@ import Ouroboros.Consensus.Config.SecurityParam import Ouroboros.Consensus.HardFork.Abstract (HasHardForkHistory (..)) import Ouroboros.Consensus.HardFork.History.Qry (qryFromExpr, runQuery, slotToGenesisWindow) +import Ouroboros.Consensus.Ledger.Basics (EmptyMK) import Ouroboros.Consensus.Ledger.Extended (ExtLedgerState, ledgerState) import Ouroboros.Consensus.Ledger.SupportsProtocol @@ -152,7 +153,7 @@ data GDDStateView m blk peer = GDDStateView { -- | The current chain selection gddCtxCurChain :: AnchoredFragment (Header blk) -- | The current ledger state - , gddCtxImmutableLedgerSt :: ExtLedgerState blk + , gddCtxImmutableLedgerSt :: ExtLedgerState blk EmptyMK -- | Callbacks to disconnect from peers , gddCtxKillActions :: Map peer (m ()) , gddCtxStates :: Map peer (ChainSyncState blk) diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/HardFork/Abstract.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/HardFork/Abstract.hs index a4e0c55f82..a2b6d069df 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/HardFork/Abstract.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/HardFork/Abstract.hs @@ -49,7 +49,7 @@ class HasHardForkHistory blk where -- information, and so this function becomes little more than a projection -- (indeed, in this case the 'LedgerState' should be irrelevant). hardForkSummary :: LedgerConfig blk - -> LedgerState blk + -> LedgerState blk mk -> HardFork.Summary (HardForkIndices blk) -- | Helper function that can be used to define 'hardForkSummary' @@ -62,7 +62,7 @@ class HasHardForkHistory blk where -- hard fork combinator). neverForksHardForkSummary :: (LedgerConfig blk -> HardFork.EraParams) -> LedgerConfig blk - -> LedgerState blk + -> LedgerState blk mk -> HardFork.Summary '[blk] neverForksHardForkSummary getParams cfg _st = HardFork.neverForksSummary eraEpochSize eraSlotLength eraGenesisWin diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/HardFork/Combinator/Abstract/SingleEraBlock.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/HardFork/Combinator/Abstract/SingleEraBlock.hs index ce8e9a1bf8..9265dbeafe 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/HardFork/Combinator/Abstract/SingleEraBlock.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/HardFork/Combinator/Abstract/SingleEraBlock.hs @@ -31,7 +31,6 @@ import Data.SOP.Match import Data.SOP.Strict import qualified Data.Text as Text import Data.Void -import NoThunks.Class (NoThunks) import Ouroboros.Consensus.Block import Ouroboros.Consensus.Config.SupportsNode import Ouroboros.Consensus.HardFork.Combinator.Info @@ -68,6 +67,7 @@ class ( LedgerSupportsProtocol blk , NodeInitStorage blk , BlockSupportsDiffusionPipelining blk , BlockSupportsMetrics blk + , CanStowLedgerTables (LedgerState blk) -- Instances required to support testing , Eq (GenTx blk) , Eq (Validated (GenTx blk)) @@ -77,9 +77,6 @@ class ( LedgerSupportsProtocol blk , Show (CannotForge blk) , Show (ForgeStateInfo blk) , Show (ForgeStateUpdateError blk) - , Show (LedgerState blk) - , Eq (LedgerState blk) - , NoThunks (LedgerState blk) ) => SingleEraBlock blk where -- | Era transition @@ -93,7 +90,7 @@ class ( LedgerSupportsProtocol blk singleEraTransition :: PartialLedgerConfig blk -> EraParams -- ^ Current era parameters -> Bound -- ^ Start of this era - -> LedgerState blk + -> LedgerState blk mk -> Maybe EpochNo -- | Era information (for use in error messages) @@ -106,7 +103,7 @@ singleEraTransition' :: SingleEraBlock blk => WrapPartialLedgerConfig blk -> EraParams -> Bound - -> LedgerState blk -> Maybe EpochNo + -> LedgerState blk mk -> Maybe EpochNo singleEraTransition' = singleEraTransition . unwrapPartialLedgerConfig {------------------------------------------------------------------------------- diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/HardFork/Combinator/Basics.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/HardFork/Combinator/Basics.hs index e89589374c..9dc67081f1 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/HardFork/Combinator/Basics.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/HardFork/Combinator/Basics.hs @@ -36,6 +36,7 @@ module Ouroboros.Consensus.HardFork.Combinator.Basics ( import Cardano.Slotting.EpochInfo import Data.Kind (Type) import Data.SOP.Constraint +import Data.SOP.Functors import Data.SOP.Strict import Data.Typeable import GHC.Generics (Generic) @@ -69,13 +70,16 @@ instance Typeable xs => ShowProxy (HardForkBlock xs) where type instance BlockProtocol (HardForkBlock xs) = HardForkProtocol xs type instance HeaderHash (HardForkBlock xs) = OneEraHash xs -newtype instance LedgerState (HardForkBlock xs) = HardForkLedgerState { - hardForkLedgerStatePerEra :: HardForkState LedgerState xs +newtype instance LedgerState (HardForkBlock xs) mk = HardForkLedgerState { + hardForkLedgerStatePerEra :: HardForkState (Flip LedgerState mk) xs } -deriving stock instance CanHardFork xs => Show (LedgerState (HardForkBlock xs)) -deriving stock instance CanHardFork xs => Eq (LedgerState (HardForkBlock xs)) -deriving newtype instance CanHardFork xs => NoThunks (LedgerState (HardForkBlock xs)) +deriving stock instance (ShowMK mk, CanHardFork xs) + => Show (LedgerState (HardForkBlock xs) mk) +deriving stock instance (EqMK mk, CanHardFork xs) + => Eq (LedgerState (HardForkBlock xs) mk) +deriving newtype instance (NoThunksMK mk, CanHardFork xs) + => NoThunks (LedgerState (HardForkBlock xs) mk) {------------------------------------------------------------------------------- Protocol config diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/HardFork/Combinator/Compat.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/HardFork/Combinator/Compat.hs index 48297fadc3..a2d77d836c 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/HardFork/Combinator/Compat.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/HardFork/Combinator/Compat.hs @@ -3,6 +3,7 @@ {-# LANGUAGE KindSignatures #-} {-# LANGUAGE RankNTypes #-} {-# LANGUAGE ScopedTypeVariables #-} +{-# LANGUAGE StandaloneKindSignatures #-} {-# LANGUAGE TypeOperators #-} module Ouroboros.Consensus.HardFork.Combinator.Compat ( @@ -29,6 +30,7 @@ import Ouroboros.Consensus.HardFork.Combinator.Ledger.Query import qualified Ouroboros.Consensus.HardFork.History.Qry as Qry import Ouroboros.Consensus.HardFork.History.Summary (Bound, Summary, initBound, neverForksSummary) +import Ouroboros.Consensus.Ledger.Query {------------------------------------------------------------------------------- Query language @@ -36,19 +38,20 @@ import Ouroboros.Consensus.HardFork.History.Summary (Bound, Summary, -- | Version of @Query (HardForkBlock xs)@ without the restriction to have -- at least two eras -data HardForkCompatQuery blk :: Type -> Type where +type HardForkCompatQuery :: Type -> QueryFootprint -> Type -> Type +data HardForkCompatQuery blk fp result where CompatIfCurrent :: - BlockQuery blk result - -> HardForkCompatQuery blk result + BlockQuery blk fp result + -> HardForkCompatQuery blk fp result CompatAnytime :: QueryAnytime result -> EraIndex (HardForkIndices blk) - -> HardForkCompatQuery blk result + -> HardForkCompatQuery blk QFNoTables result CompatHardFork :: QueryHardFork (HardForkIndices blk) result - -> HardForkCompatQuery blk result + -> HardForkCompatQuery blk QFNoTables result {------------------------------------------------------------------------------- Convenience constructors for 'HardForkCompatQuery' @@ -56,21 +59,21 @@ data HardForkCompatQuery blk :: Type -> Type where -- | Submit query to underlying ledger compatIfCurrent :: - BlockQuery blk result - -> HardForkCompatQuery blk result + BlockQuery fp blk result + -> HardForkCompatQuery fp blk result compatIfCurrent = CompatIfCurrent -- | Get the start of the specified era, if known compatGetEraStart :: EraIndex (HardForkIndices blk) - -> HardForkCompatQuery blk (Maybe Bound) + -> HardForkCompatQuery blk QFNoTables (Maybe Bound) compatGetEraStart = CompatAnytime GetEraStart -- | Get an interpreter for history queries -- -- I.e., this can be used for slot/epoch/time conversions. compatGetInterpreter :: - HardForkCompatQuery blk (Qry.Interpreter (HardForkIndices blk)) + HardForkCompatQuery blk QFNoTables (Qry.Interpreter (HardForkIndices blk)) compatGetInterpreter = CompatHardFork GetInterpreter {------------------------------------------------------------------------------- @@ -80,13 +83,13 @@ compatGetInterpreter = CompatHardFork GetInterpreter -- | Wrapper used when connecting to a server that's running the HFC with -- at least two eras forwardCompatQuery :: - forall m x xs. IsNonEmpty xs - => (forall result. BlockQuery (HardForkBlock (x ': xs)) result -> m result) + forall m x xs fp. IsNonEmpty xs + => (forall result. BlockQuery (HardForkBlock (x ': xs)) fp result -> m result) -- ^ Submit a query through the LocalStateQuery protocol. - -> (forall result. HardForkCompatQuery (HardForkBlock (x ': xs)) result -> m result) + -> (forall result. HardForkCompatQuery (HardForkBlock (x ': xs)) fp result -> m result) forwardCompatQuery f = go where - go :: HardForkCompatQuery (HardForkBlock (x ': xs)) result -> m result + go :: HardForkCompatQuery (HardForkBlock (x ': xs)) fp result -> m result go (CompatIfCurrent qry) = f qry go (CompatAnytime qry ix) = f (QueryAnytime qry ix) go (CompatHardFork qry) = f (QueryHardFork qry) @@ -94,16 +97,16 @@ forwardCompatQuery f = go -- | Wrapper used when connecting to a server that's not using the HFC, or -- is using the HFC but with a single era only. singleEraCompatQuery :: - forall m blk era. (Monad m, HardForkIndices blk ~ '[era]) + forall m blk era fp. (Monad m, HardForkIndices blk ~ '[era]) => EpochSize -> SlotLength -> GenesisWindow - -> (forall result. BlockQuery blk result -> m result) + -> (forall result. BlockQuery blk fp result -> m result) -- ^ Submit a query through the LocalStateQuery protocol. - -> (forall result. HardForkCompatQuery blk result -> m result) + -> (forall result. HardForkCompatQuery blk fp result -> m result) singleEraCompatQuery epochSize slotLen genesisWindow f = go where - go :: HardForkCompatQuery blk result -> m result + go :: HardForkCompatQuery blk fp result -> m result go (CompatIfCurrent qry) = f qry go (CompatAnytime qry ix) = const (goAnytime qry) (trivialIndex ix) go (CompatHardFork qry) = goHardFork qry @@ -113,7 +116,7 @@ singleEraCompatQuery epochSize slotLen genesisWindow f = go goHardFork :: QueryHardFork '[era] result -> m result goHardFork GetInterpreter = return $ Qry.mkInterpreter summary - goHardFork GetCurrentEra = return $ eraIndexZero + goHardFork GetCurrentEra = return eraIndexZero summary :: Summary '[era] summary = neverForksSummary epochSize slotLen genesisWindow diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/HardFork/Combinator/Degenerate.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/HardFork/Combinator/Degenerate.hs index fcb70b8ed6..289f92ee50 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/HardFork/Combinator/Degenerate.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/HardFork/Combinator/Degenerate.hs @@ -3,6 +3,7 @@ {-# LANGUAGE FlexibleInstances #-} {-# LANGUAGE GADTs #-} {-# LANGUAGE PatternSynonyms #-} +{-# LANGUAGE PolyKinds #-} {-# LANGUAGE ScopedTypeVariables #-} {-# LANGUAGE TypeApplications #-} {-# LANGUAGE TypeOperators #-} @@ -31,6 +32,7 @@ module Ouroboros.Consensus.HardFork.Combinator.Degenerate ( , TxId (DegenGenTxId) ) where +import Data.SOP.Functors (Flip (..)) import Data.SOP.Strict import Ouroboros.Consensus.Block.Abstract import Ouroboros.Consensus.Config @@ -137,8 +139,8 @@ pattern DegenTipInfo x <- (project' (Proxy @(WrapTipInfo b)) -> x) pattern DegenQuery :: () => HardForkQueryResult '[b] result ~ a - => BlockQuery b result - -> BlockQuery (HardForkBlock '[b]) a + => BlockQuery b fp result + -> BlockQuery (HardForkBlock '[b]) fp a pattern DegenQuery x <- (projQuery' -> ProjHardForkQuery x) where DegenQuery x = injQuery x @@ -168,11 +170,11 @@ pattern DegenBlockConfig x <- (project -> x) pattern DegenLedgerState :: NoHardForks b - => LedgerState b - -> LedgerState (HardForkBlock '[b]) -pattern DegenLedgerState x <- (project -> x) + => LedgerState b mk + -> LedgerState (HardForkBlock '[b]) mk +pattern DegenLedgerState x <- (unFlip . project . Flip -> x) where - DegenLedgerState x = inject x + DegenLedgerState x = unFlip $ inject $ Flip x {------------------------------------------------------------------------------- Dealing with the config diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/HardFork/Combinator/Embed/Binary.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/HardFork/Combinator/Embed/Binary.hs index 72b916d933..97ab5adc7a 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/HardFork/Combinator/Embed/Binary.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/HardFork/Combinator/Embed/Binary.hs @@ -9,6 +9,7 @@ module Ouroboros.Consensus.HardFork.Combinator.Embed.Binary (protocolInfoBinary) import Control.Exception (assert) import Data.Align (alignWith) import Data.SOP.Counting (exactlyTwo) +import Data.SOP.Functors (Flip (..)) import Data.SOP.OptNP (OptNP (..)) import Data.SOP.Strict (NP (..)) import Data.These (These (..)) @@ -83,7 +84,7 @@ protocolInfoBinary protocolInfo1 blockForging1 eraParams1 toPartialConsensusConf , pInfoInitLedger = ExtLedgerState { ledgerState = HardForkLedgerState $ - initHardForkState initLedgerState1 + initHardForkState (Flip initLedgerState1) , headerState = genesisHeaderState $ initHardForkState $ diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/HardFork/Combinator/Embed/Nary.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/HardFork/Combinator/Embed/Nary.hs index 6ed0df1002..249c7d08ca 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/HardFork/Combinator/Embed/Nary.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/HardFork/Combinator/Embed/Nary.hs @@ -24,6 +24,7 @@ import Data.Coerce (Coercible, coerce) import Data.SOP.BasicFunctors import Data.SOP.Counting (Exactly (..)) import Data.SOP.Dict (Dict (..)) +import Data.SOP.Functors (Flip (..)) import Data.SOP.Index import qualified Data.SOP.InPairs as InPairs import Data.SOP.Strict @@ -34,10 +35,12 @@ import qualified Ouroboros.Consensus.HardFork.Combinator.State as State import qualified Ouroboros.Consensus.HardFork.History as History import Ouroboros.Consensus.HeaderValidation (AnnTip, HeaderState (..), genesisHeaderState) +import Ouroboros.Consensus.Ledger.Basics import Ouroboros.Consensus.Ledger.Extended (ExtLedgerState (..)) +import Ouroboros.Consensus.Ledger.Query +import Ouroboros.Consensus.Ledger.Tables.Utils import Ouroboros.Consensus.Storage.Serialisation import Ouroboros.Consensus.TypeFamilyWrappers -import Ouroboros.Consensus.Util ((.:)) {------------------------------------------------------------------------------- Injection for a single block into a HardForkBlock @@ -45,7 +48,7 @@ import Ouroboros.Consensus.Util ((.:)) class Inject f where inject :: - forall x xs. CanHardFork xs + forall x xs. (CanHardFork xs, HasCanonicalTxIn xs, HasHardForkTxOut xs) => Exactly xs History.Bound -- ^ Start bound of each era -> Index xs x @@ -56,6 +59,8 @@ inject' :: forall f a b x xs. ( Inject f , CanHardFork xs + , HasCanonicalTxIn xs + , HasHardForkTxOut xs , Coercible a (f x) , Coercible b (f (HardForkBlock xs)) ) @@ -76,10 +81,10 @@ injectNestedCtxt_ idx nc = case idx of IS idx' -> NCS (injectNestedCtxt_ idx' nc) injectQuery :: - forall x xs result. + forall x xs result fp. Index xs x - -> BlockQuery x result - -> QueryIfCurrent xs result + -> BlockQuery x fp result + -> QueryIfCurrent xs fp result injectQuery idx q = case idx of IZ -> QZ q IS idx' -> QS (injectQuery idx' q) @@ -142,15 +147,16 @@ instance Inject WrapApplyTxErr where (WrapApplyTxErr . HardForkApplyTxErrFromEra) .: injectNS' (Proxy @WrapApplyTxErr) -instance Inject (SomeSecond BlockQuery) where - inject _ idx (SomeSecond q) = SomeSecond (QueryIfCurrent (injectQuery idx q)) +instance Inject (SomeBlockQuery :.: BlockQuery) where + inject _ idx (Comp (SomeBlockQuery q)) = + Comp $ SomeBlockQuery $ QueryIfCurrent (injectQuery idx q) instance Inject AnnTip where inject _ = undistribAnnTip .: injectNS' (Proxy @AnnTip) -instance Inject LedgerState where +instance Inject (Flip LedgerState mk) where inject startBounds idx = - HardForkLedgerState . injectHardForkState startBounds idx + Flip . HardForkLedgerState . injectHardForkState startBounds idx instance Inject WrapChainDepState where inject startBounds idx = @@ -164,9 +170,9 @@ instance Inject HeaderState where $ WrapChainDepState headerStateChainDep } -instance Inject ExtLedgerState where - inject startBounds idx ExtLedgerState {..} = ExtLedgerState { - ledgerState = inject startBounds idx ledgerState +instance Inject (Flip ExtLedgerState mk) where + inject startBounds idx (Flip ExtLedgerState {..}) = Flip $ ExtLedgerState { + ledgerState = unFlip $ inject startBounds idx (Flip ledgerState) , headerState = inject startBounds idx headerState } @@ -185,10 +191,10 @@ instance Inject ExtLedgerState where -- problematic, but extending 'ledgerViewForecastAt' is a lot more subtle; see -- @forecastNotFinal@. injectInitialExtLedgerState :: - forall x xs. CanHardFork (x ': xs) + forall x xs. (CanHardFork (x ': xs), HasLedgerTables (LedgerState (HardForkBlock (x : xs)))) => TopLevelConfig (HardForkBlock (x ': xs)) - -> ExtLedgerState x - -> ExtLedgerState (HardForkBlock (x ': xs)) + -> ExtLedgerState x ValuesMK + -> ExtLedgerState (HardForkBlock (x ': xs)) ValuesMK injectInitialExtLedgerState cfg extLedgerState0 = ExtLedgerState { ledgerState = targetEraLedgerState @@ -203,15 +209,19 @@ injectInitialExtLedgerState cfg extLedgerState0 = (hardForkLedgerStatePerEra targetEraLedgerState)) cfg - targetEraLedgerState :: LedgerState (HardForkBlock (x ': xs)) - targetEraLedgerState = - HardForkLedgerState $ - -- We can immediately extend it to the right slot, executing any - -- scheduled hard forks in the first slot - State.extendToSlot - (configLedger cfg) - (SlotNo 0) - (initHardForkState (ledgerState extLedgerState0)) + targetEraLedgerState :: LedgerState (HardForkBlock (x ': xs)) ValuesMK + targetEraLedgerState = applyDiffs st st' + where + st :: LedgerState (HardForkBlock (x ': xs)) ValuesMK + st = HardForkLedgerState . initHardForkState . Flip . ledgerState $ extLedgerState0 + st' = HardForkLedgerState + -- We can immediately extend it to the right slot, executing any + -- scheduled hard forks in the first slot + (State.extendToSlot + (configLedger cfg) + (SlotNo 0) + (initHardForkState $ Flip $ forgetLedgerTables $ ledgerState extLedgerState0)) + firstEraChainDepState :: HardForkChainDepState (x ': xs) firstEraChainDepState = diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/HardFork/Combinator/Embed/Unary.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/HardFork/Combinator/Embed/Unary.hs index d182dbb22e..0855c4e2fb 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/HardFork/Combinator/Embed/Unary.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/HardFork/Combinator/Embed/Unary.hs @@ -10,6 +10,7 @@ {-# LANGUAGE RankNTypes #-} {-# LANGUAGE RecordWildCards #-} {-# LANGUAGE ScopedTypeVariables #-} +{-# LANGUAGE StandaloneKindSignatures #-} {-# LANGUAGE TypeApplications #-} {-# LANGUAGE TypeOperators #-} @@ -38,6 +39,7 @@ import Data.Coerce import Data.Kind (Type) import Data.Proxy import Data.SOP.BasicFunctors +import Data.SOP.Functors import qualified Data.SOP.OptNP as OptNP import Data.SOP.Strict import qualified Data.SOP.Telescope as Telescope @@ -61,6 +63,7 @@ import qualified Ouroboros.Consensus.HardFork.History as History import Ouroboros.Consensus.HeaderValidation import Ouroboros.Consensus.Ledger.Abstract import Ouroboros.Consensus.Ledger.Extended +import Ouroboros.Consensus.Ledger.Query import Ouroboros.Consensus.Ledger.SupportsMempool import Ouroboros.Consensus.Node.ProtocolInfo import Ouroboros.Consensus.Protocol.Abstract @@ -195,7 +198,7 @@ instance Isomorphic StorageConfig where project = defaultProjectNP inject = defaultInjectNP -instance Isomorphic LedgerState where +instance Isomorphic (Flip LedgerState mk) where project = defaultProjectSt inject = defaultInjectSt @@ -336,29 +339,29 @@ instance Isomorphic HeaderState where , headerStateChainDep = inject' (Proxy @(WrapChainDepState blk)) headerStateChainDep } -instance Isomorphic (Ticked :.: LedgerState) where +instance Isomorphic (FlipTickedLedgerState mk) where project = State.currentState . Telescope.fromTZ . getHardForkState . tickedHardForkLedgerStatePerEra - . unComp + . getFlipTickedLedgerState inject = - Comp + FlipTickedLedgerState . TickedHardForkLedgerState TransitionImpossible . HardForkState . Telescope.TZ . State.Current History.initBound -instance Isomorphic ExtLedgerState where - project ExtLedgerState{..} = ExtLedgerState { - ledgerState = project ledgerState +instance Isomorphic (Flip ExtLedgerState mk) where + project (Flip ExtLedgerState{..}) = Flip $ ExtLedgerState { + ledgerState = unFlip $ project $ Flip ledgerState , headerState = project headerState } - inject ExtLedgerState{..} = ExtLedgerState { - ledgerState = inject ledgerState + inject (Flip ExtLedgerState{..}) = Flip $ ExtLedgerState { + ledgerState = unFlip $ inject $ Flip ledgerState , headerState = inject headerState } @@ -371,11 +374,11 @@ instance Isomorphic AnnTip where instance Functor m => Isomorphic (InitChainDB m) where project :: forall blk. NoHardForks blk => InitChainDB m (HardForkBlock '[blk]) -> InitChainDB m blk - project = InitChainDB.map (inject' (Proxy @(I blk))) project + project = InitChainDB.map (inject' (Proxy @(I blk))) (unFlip . project . Flip) inject :: forall blk. NoHardForks blk => InitChainDB m blk -> InitChainDB m (HardForkBlock '[blk]) - inject = InitChainDB.map (project' (Proxy @(I blk))) inject + inject = InitChainDB.map (project' (Proxy @(I blk))) (unFlip . inject . Flip) instance Isomorphic ProtocolClientInfo where project ProtocolClientInfo{..} = ProtocolClientInfo { @@ -442,7 +445,7 @@ instance Functor m => Isomorphic (BlockForging m) where (inject cfg) bno sno - (unComp (inject (Comp tickedLgrSt))) + (getFlipTickedLedgerState (inject (FlipTickedLedgerState tickedLgrSt))) (inject' (Proxy @(WrapValidatedGenTx blk)) <$> txs) (inject' (Proxy @(WrapIsLeader blk)) isLeader) } @@ -485,7 +488,7 @@ instance Functor m => Isomorphic (BlockForging m) where (project cfg) bno sno - (unComp (project (Comp tickedLgrSt))) + (getFlipTickedLedgerState (project (FlipTickedLedgerState tickedLgrSt))) (project' (Proxy @(WrapValidatedGenTx blk)) <$> txs) (project' (Proxy @(WrapIsLeader blk)) isLeader) } @@ -504,14 +507,14 @@ instance Isomorphic ProtocolInfo where => ProtocolInfo (HardForkBlock '[blk]) -> ProtocolInfo blk project ProtocolInfo {..} = ProtocolInfo { pInfoConfig = project pInfoConfig - , pInfoInitLedger = project pInfoInitLedger + , pInfoInitLedger = unFlip $ project $ Flip pInfoInitLedger } inject :: forall blk. NoHardForks blk => ProtocolInfo blk -> ProtocolInfo (HardForkBlock '[blk]) inject ProtocolInfo {..} = ProtocolInfo { pInfoConfig = inject pInfoConfig - , pInfoInitLedger = inject pInfoInitLedger + , pInfoInitLedger = unFlip $ inject $ Flip pInfoInitLedger } {------------------------------------------------------------------------------- @@ -611,10 +614,10 @@ instance Isomorphic SerialisedHeader where -- | Project 'BlockQuery' -- -- Not an instance of 'Isomorphic' because the types change. -projQuery :: BlockQuery (HardForkBlock '[b]) result +projQuery :: BlockQuery (HardForkBlock '[b]) fp result -> (forall result'. (result :~: HardForkQueryResult '[b] result') - -> BlockQuery b result' + -> BlockQuery b fp result' -> a) -> a projQuery qry k = @@ -624,24 +627,25 @@ projQuery qry k = (\Refl prfNonEmpty _ _ -> case prfNonEmpty of {}) (\Refl prfNonEmpty _ -> case prfNonEmpty of {}) where - aux :: QueryIfCurrent '[b] result -> BlockQuery b result + aux :: QueryIfCurrent '[b] fp result -> BlockQuery b fp result aux (QZ q) = q aux (QS q) = case q of {} -projQuery' :: BlockQuery (HardForkBlock '[b]) result - -> ProjHardForkQuery b result +projQuery' :: BlockQuery (HardForkBlock '[b]) fp result + -> ProjHardForkQuery fp b result projQuery' qry = projQuery qry $ \Refl -> ProjHardForkQuery -data ProjHardForkQuery b :: Type -> Type where +type ProjHardForkQuery :: QueryFootprint -> Type -> Type -> Type +data ProjHardForkQuery fp b res where ProjHardForkQuery :: - BlockQuery b result' - -> ProjHardForkQuery b (HardForkQueryResult '[b] result') + BlockQuery b fp result' + -> ProjHardForkQuery fp b (HardForkQueryResult '[b] result') -- | Inject 'BlockQuery' -- -- Not an instance of 'Isomorphic' because the types change. -injQuery :: BlockQuery b result - -> BlockQuery (HardForkBlock '[b]) (HardForkQueryResult '[b] result) +injQuery :: forall fp b result. BlockQuery b fp result + -> BlockQuery (HardForkBlock '[b]) fp (HardForkQueryResult '[b] result) injQuery = QueryIfCurrent . QZ projQueryResult :: HardForkQueryResult '[b] result -> result diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/HardFork/Combinator/Forging.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/HardFork/Combinator/Forging.hs index 5aab6219d9..4ffcdc5037 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/HardFork/Combinator/Forging.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/HardFork/Combinator/Forging.hs @@ -33,13 +33,12 @@ import Ouroboros.Consensus.HardFork.Combinator.Abstract import Ouroboros.Consensus.HardFork.Combinator.AcrossEras import Ouroboros.Consensus.HardFork.Combinator.Basics import Ouroboros.Consensus.HardFork.Combinator.InjectTxs -import Ouroboros.Consensus.HardFork.Combinator.Ledger (Ticked (..)) +import Ouroboros.Consensus.HardFork.Combinator.Ledger import Ouroboros.Consensus.HardFork.Combinator.Mempool import Ouroboros.Consensus.HardFork.Combinator.Protocol import qualified Ouroboros.Consensus.HardFork.Combinator.State as State import Ouroboros.Consensus.Ledger.Abstract import Ouroboros.Consensus.TypeFamilyWrappers -import Ouroboros.Consensus.Util ((.:)) -- | If we cannot forge, it's because the current era could not forge type HardForkCannotForge xs = OneEraCannotForge xs @@ -288,7 +287,7 @@ hardForkForgeBlock :: -> TopLevelConfig (HardForkBlock xs) -> BlockNo -> SlotNo - -> TickedLedgerState (HardForkBlock xs) + -> TickedLedgerState (HardForkBlock xs) EmptyMK -> [Validated (GenTx (HardForkBlock xs))] -> HardForkIsLeader xs -> m (HardForkBlock xs) @@ -355,7 +354,7 @@ hardForkForgeBlock blockForging -> Product (Product WrapIsLeader - (Ticked :.: LedgerState)) + (FlipTickedLedgerState EmptyMK)) ([] :.: WrapValidatedGenTx) blk -> m blk @@ -363,7 +362,7 @@ hardForkForgeBlock blockForging cfg' (Comp mBlockForging') (Pair - (Pair (WrapIsLeader isLeader') (Comp ledgerState')) + (Pair (WrapIsLeader isLeader') (FlipTickedLedgerState ledgerState')) (Comp txs')) = forgeBlock (fromMaybe diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/HardFork/Combinator/InjectTxs.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/HardFork/Combinator/InjectTxs.hs index e46d5f2c9c..5e27799889 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/HardFork/Combinator/InjectTxs.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/HardFork/Combinator/InjectTxs.hs @@ -1,4 +1,6 @@ +{-# LANGUAGE DataKinds #-} {-# LANGUAGE FlexibleContexts #-} +{-# LANGUAGE LambdaCase #-} {-# LANGUAGE PatternSynonyms #-} {-# LANGUAGE RecordWildCards #-} {-# LANGUAGE ScopedTypeVariables #-} @@ -9,8 +11,12 @@ module Ouroboros.Consensus.HardFork.Combinator.InjectTxs ( -- * Polymorphic InjectPolyTx (..) + , ListOfTxs (..) + , TelescopeWithTxList + , TxsWithOriginal (..) , cannotInjectPolyTx , matchPolyTx + , matchPolyTxs , matchPolyTxsNS -- * Unvalidated transactions , InjectTx @@ -26,11 +32,12 @@ module Ouroboros.Consensus.HardFork.Combinator.InjectTxs ( ) where import Data.Bifunctor +import Data.Either (partitionEithers) import Data.Functor.Product import Data.SOP.BasicFunctors +import Data.SOP.Constraint import Data.SOP.InPairs (InPairs (..)) import Data.SOP.Match -import Data.SOP.Sing import Data.SOP.Strict import Data.SOP.Telescope (Telescope (..)) import qualified Data.SOP.Telescope as Telescope @@ -94,6 +101,103 @@ matchPolyTx is tx = , currentState = Pair tx' currentState } +-- | A transaction coupled with its original version. +-- +-- We use this to keep the original hard fork transaction around, as otherwise +-- we would lose the index at which the transaction was originally, before +-- translations. +data TxsWithOriginal tx xs blk = + TxsWithOriginal { origTx :: !(NS tx xs) + , blkTx :: !(tx blk) + } + +-- | A partially applied list of tuples. +-- +-- In the end it represents @[(orig :: NS tx xs, t :: tx blk), ...]@ for some +-- @blk@. +newtype ListOfTxs tx xs blk = ListOfTxs { txsList :: [TxsWithOriginal tx xs blk] } + +-- | A special telescope. This type alias is used just for making this more +-- readable. +-- +-- This in the end is basically: +-- +-- > TS ... ( +-- > TZ ( +-- > [(orig, tx), ...] +-- > , f +-- > ) ...) +-- +-- So at the tip of the telescope, we have both an @f@ and a list of tuples of +-- transactions. +type TelescopeWithTxList g f tx xs' xs = + Telescope g (Product (ListOfTxs tx xs') f) xs + +matchPolyTxs' :: + All Top xs + => InPairs (InjectPolyTx tx) xs + -> [NS tx xs] + -> Telescope g f xs + -> ( [(NS tx xs, Mismatch tx f xs)] + , TelescopeWithTxList g f tx xs xs + ) +matchPolyTxs' ips txs = go ips [ hmap (TxsWithOriginal x) x | x <- txs ] + where + tipFst :: All Top xs => NS (TxsWithOriginal tx xs') xs -> NS tx xs' + tipFst = hcollapse . hmap (K . origTx) + + go :: All Top xs + => InPairs (InjectPolyTx tx) xs + -> [NS (TxsWithOriginal tx xs') xs] + -> Telescope g f xs + -> ( [(NS tx xs', Mismatch tx f xs)] + , TelescopeWithTxList g f tx xs' xs + ) + go _ txs' (TZ f) = + let (rejected, accepted) = + partitionEithers + $ map (\case + Z x -> Right x + -- The ones from later eras are invalid + S x -> Left (tipFst x, MR (hmap blkTx x) f) + ) txs' + in (rejected, TZ (Pair (ListOfTxs accepted) f)) + + go (PCons i is) txs' (TS g f) = + let (rejected, translated) = + partitionEithers + $ map (\case + Z (TxsWithOriginal origx x) -> + case injectTxWith i x of + -- The ones from this era that we cannot transport to + -- the next era are invalid + Nothing -> Left (origx, ML x (Telescope.tip f)) + Just x' -> Right $ Z (TxsWithOriginal origx x') + S x -> Right x + ) txs' + (nextRejected, nextState) = go is translated f + in (rejected ++ map (second MS) nextRejected, TS g nextState) + +matchPolyTxs :: + SListI xs + => InPairs (InjectPolyTx tx) xs + -> [NS tx xs] + -> HardForkState f xs + -> ( [(NS tx xs, Mismatch tx (Current f) xs)] + , HardForkState (Product (ListOfTxs tx xs) f) xs + ) +matchPolyTxs is tx = + fmap (HardForkState . hmap distrib) + . matchPolyTxs' is tx + . getHardForkState + where + distrib :: Product (ListOfTxs tx xs) (Current f) blk + -> Current (Product (ListOfTxs tx xs) f) blk + distrib (Pair x Current{..}) = Current { + currentStart = currentStart + , currentState = Pair x currentState + } + -- | Match transaction with an 'NS', attempting to inject where possible matchPolyTxNS :: InPairs (InjectPolyTx tx) xs diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/HardFork/Combinator/Ledger.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/HardFork/Combinator/Ledger.hs index aaa001f0b8..518a46d0b7 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/HardFork/Combinator/Ledger.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/HardFork/Combinator/Ledger.hs @@ -1,3 +1,4 @@ +{-# LANGUAGE ConstraintKinds #-} {-# LANGUAGE DataKinds #-} {-# LANGUAGE DeriveAnyClass #-} {-# LANGUAGE DeriveGeneric #-} @@ -5,10 +6,14 @@ {-# LANGUAGE EmptyCase #-} {-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE FlexibleInstances #-} +{-# LANGUAGE InstanceSigs #-} +{-# LANGUAGE LambdaCase #-} {-# LANGUAGE MultiParamTypeClasses #-} +{-# LANGUAGE RankNTypes #-} {-# LANGUAGE RecordWildCards #-} {-# LANGUAGE ScopedTypeVariables #-} {-# LANGUAGE StandaloneDeriving #-} +{-# LANGUAGE StandaloneKindSignatures #-} {-# LANGUAGE TypeApplications #-} {-# LANGUAGE TypeFamilies #-} {-# LANGUAGE TypeOperators #-} @@ -22,20 +27,41 @@ module Ouroboros.Consensus.HardFork.Combinator.Ledger ( , HardForkLedgerUpdate (..) , HardForkLedgerWarning (..) -- * Type family instances - , Ticked (..) + , FlipTickedLedgerState (..) + , Ticked1 (..) -- * Low-level API (exported for the benefit of testing) , AnnForecast (..) , mkHardForkForecast + -- * Ledger tables + , HardForkHasLedgerTables + , distribLedgerTables + , injectLedgerTables + -- ** HardForkTxIn + , HasCanonicalTxIn (..) + -- ** HardForkTxOut + , DefaultHardForkTxOut + , HasHardForkTxOut (..) + , distribHardForkTxOutDefault + , injectHardForkTxOutDefault + -- *** Serialisation + , SerializeHardForkTxOut (..) + , decodeHardForkTxOutDefault + , encodeHardForkTxOutDefault ) where +import qualified Codec.CBOR.Decoding as CBOR +import qualified Codec.CBOR.Encoding as CBOR import Control.Monad (guard) import Control.Monad.Except (throwError, withExcept) import Data.Functor ((<&>)) import Data.Functor.Product +import Data.Kind (Type) +import Data.Maybe (fromMaybe) import Data.Proxy import Data.SOP.BasicFunctors import Data.SOP.Constraint import Data.SOP.Counting (getExactly) +import Data.SOP.Functors (Flip (..)) import Data.SOP.Index import Data.SOP.InPairs (InPairs (..)) import qualified Data.SOP.InPairs as InPairs @@ -43,6 +69,8 @@ import qualified Data.SOP.Match as Match import Data.SOP.Strict import Data.SOP.Telescope (Telescope (..)) import qualified Data.SOP.Telescope as Telescope +import Data.Void +import Data.Word (Word8) import GHC.Generics (Generic) import NoThunks.Class (NoThunks (..)) import Ouroboros.Consensus.Block @@ -67,9 +95,18 @@ import Ouroboros.Consensus.HeaderValidation import Ouroboros.Consensus.Ledger.Abstract import Ouroboros.Consensus.Ledger.Inspect import Ouroboros.Consensus.Ledger.SupportsProtocol +import Ouroboros.Consensus.Ledger.Tables.Utils +import Ouroboros.Consensus.Ticked import Ouroboros.Consensus.TypeFamilyWrappers import Ouroboros.Consensus.Util.Condense +-- $setup +-- >>> import Image.LaTeX.Render +-- >>> import Control.Monad +-- >>> import System.Directory +-- >>> +-- >>> createDirectoryIfMissing True "docs/haddocks/" + {------------------------------------------------------------------------------- Errors -------------------------------------------------------------------------------} @@ -88,29 +125,28 @@ data HardForkLedgerError xs = instance CanHardFork xs => GetTip (LedgerState (HardForkBlock xs)) where getTip = castPoint - . State.getTip (castPoint . getTip) + . State.getTip (castPoint . getTip . unFlip) . hardForkLedgerStatePerEra -instance CanHardFork xs => GetTip (Ticked (LedgerState (HardForkBlock xs))) where +instance CanHardFork xs => GetTip (Ticked1 (LedgerState (HardForkBlock xs))) where getTip = castPoint - . State.getTip (castPoint . getTip . unComp) + . State.getTip (castPoint . getTip . getFlipTickedLedgerState) . tickedHardForkLedgerStatePerEra {------------------------------------------------------------------------------- Ticking -------------------------------------------------------------------------------} -data instance Ticked (LedgerState (HardForkBlock xs)) = +newtype FlipTickedLedgerState mk blk = FlipTickedLedgerState { + getFlipTickedLedgerState :: Ticked1 (LedgerState blk) mk + } + +data instance Ticked1 (LedgerState (HardForkBlock xs)) mk = TickedHardForkLedgerState { tickedHardForkLedgerStateTransition :: !TransitionInfo , tickedHardForkLedgerStatePerEra :: - !(HardForkState (Ticked :.: LedgerState) xs) + !(HardForkState (FlipTickedLedgerState mk) xs) } - deriving (Generic) - -deriving anyclass instance - CanHardFork xs - => NoThunks (Ticked (LedgerState (HardForkBlock xs))) instance CanHardFork xs => IsLedger (LedgerState (HardForkBlock xs)) where type LedgerErr (LedgerState (HardForkBlock xs)) = HardForkLedgerError xs @@ -149,27 +185,38 @@ instance CanHardFork xs => IsLedger (LedgerState (HardForkBlock xs)) where cfgs = getPerEraLedgerConfig hardForkLedgerConfigPerEra ei = State.epochInfoLedger cfg st - extended :: HardForkState LedgerState xs + extended :: HardForkState (Flip LedgerState DiffMK) xs extended = State.extendToSlot cfg slot st tickOne :: SingleEraBlock blk => EpochInfo (Except PastHorizonException) -> SlotNo - -> Index xs blk - -> WrapPartialLedgerConfig blk - -> LedgerState blk - -> ( LedgerResult (LedgerState (HardForkBlock xs)) - :.: (Ticked :.: LedgerState) - ) blk -tickOne ei slot index pcfg st = Comp $ fmap Comp $ - embedLedgerResult (injectLedgerEvent index) - $ applyChainTickLedgerResult (completeLedgerConfig' ei pcfg) slot st + -> Index xs blk + -> WrapPartialLedgerConfig blk + -> (Flip LedgerState DiffMK) blk + -> ( LedgerResult (LedgerState (HardForkBlock xs)) + :.: FlipTickedLedgerState DiffMK + ) blk +tickOne ei slot sopIdx partialCfg st = + Comp + . fmap ( FlipTickedLedgerState + . prependDiffs (unFlip st) + ) + . embedLedgerResult (injectLedgerEvent sopIdx) + . applyChainTickLedgerResult (completeLedgerConfig' ei partialCfg) slot + . forgetLedgerTables + . unFlip + $ st {------------------------------------------------------------------------------- ApplyBlock -------------------------------------------------------------------------------} -instance CanHardFork xs +instance ( CanHardFork xs + , HardForkHasLedgerTables xs + , HasCanonicalTxIn xs + , HasHardForkTxOut xs + ) => ApplyBlock (LedgerState (HardForkBlock xs)) (HardForkBlock xs) where applyBlockLedgerResult cfg @@ -212,29 +259,41 @@ instance CanHardFork xs transition st + getBlockKeySets (HardForkBlock (OneEraBlock ns)) = + hcollapse + $ hcimap proxySingle f ns + where + f :: + SingleEraBlock x + => Index xs x + -> I x + -> K (LedgerTables (LedgerState (HardForkBlock xs)) KeysMK) x + f idx (I blk) = K $ injectLedgerTables idx $ getBlockKeySets blk + apply :: SingleEraBlock blk => Index xs blk -> WrapLedgerConfig blk - -> Product I (Ticked :.: LedgerState) blk + -> Product I (FlipTickedLedgerState ValuesMK) blk -> ( Except (HardForkLedgerError xs) :.: LedgerResult (LedgerState (HardForkBlock xs)) - :.: LedgerState + :.: Flip LedgerState DiffMK ) blk -apply index (WrapLedgerConfig cfg) (Pair (I block) (Comp st)) = +apply index (WrapLedgerConfig cfg) (Pair (I block) (FlipTickedLedgerState st)) = Comp $ withExcept (injectLedgerError index) - $ fmap (Comp . embedLedgerResult (injectLedgerEvent index)) + $ fmap (Comp . fmap Flip . embedLedgerResult (injectLedgerEvent index)) $ applyBlockLedgerResult cfg block st reapply :: SingleEraBlock blk => Index xs blk -> WrapLedgerConfig blk - -> Product I (Ticked :.: LedgerState) blk + -> Product I (FlipTickedLedgerState ValuesMK) blk -> ( LedgerResult (LedgerState (HardForkBlock xs)) - :.: LedgerState + :.: Flip LedgerState DiffMK ) blk -reapply index (WrapLedgerConfig cfg) (Pair (I block) (Comp st)) = +reapply index (WrapLedgerConfig cfg) (Pair (I block) (FlipTickedLedgerState st)) = Comp + $ fmap Flip $ embedLedgerResult (injectLedgerEvent index) $ reapplyBlockLedgerResult cfg block st @@ -242,7 +301,11 @@ reapply index (WrapLedgerConfig cfg) (Pair (I block) (Comp st)) = UpdateLedger -------------------------------------------------------------------------------} -instance CanHardFork xs => UpdateLedger (HardForkBlock xs) +instance ( CanHardFork xs + , HardForkHasLedgerTables xs + , HasCanonicalTxIn xs + , HasHardForkTxOut xs + ) => UpdateLedger (HardForkBlock xs) {------------------------------------------------------------------------------- HasHardForkHistory @@ -311,7 +374,11 @@ instance CanHardFork xs => ValidateEnvelope (HardForkBlock xs) where LedgerSupportsProtocol -------------------------------------------------------------------------------} -instance CanHardFork xs => LedgerSupportsProtocol (HardForkBlock xs) where +instance ( CanHardFork xs + , HardForkHasLedgerTables xs + , HasCanonicalTxIn xs + , HasHardForkTxOut xs + ) => LedgerSupportsProtocol (HardForkBlock xs) where protocolLedgerView HardForkLedgerConfig{..} (TickedHardForkLedgerState transition ticked) = HardForkLedgerView { @@ -328,9 +395,9 @@ instance CanHardFork xs => LedgerSupportsProtocol (HardForkBlock xs) where viewOne :: SingleEraBlock blk => WrapPartialLedgerConfig blk - -> (Ticked :.: LedgerState) blk + -> FlipTickedLedgerState mk blk -> WrapLedgerView blk - viewOne cfg (Comp st) = + viewOne cfg (FlipTickedLedgerState st) = WrapLedgerView $ protocolLedgerView (completeLedgerConfig' ei cfg) st @@ -354,17 +421,17 @@ instance CanHardFork xs => LedgerSupportsProtocol (HardForkBlock xs) where (getHardForkState ledgerSt) forecastOne :: - forall blk. SingleEraBlock blk + forall blk mk. SingleEraBlock blk => WrapPartialLedgerConfig blk -> K EraParams blk - -> Current LedgerState blk + -> Current (Flip LedgerState mk) blk -> Current (AnnForecast LedgerState WrapLedgerView) blk - forecastOne cfg (K params) (Current start st) = Current { + forecastOne cfg (K params) (Current start (Flip st)) = Current { currentStart = start , currentState = AnnForecast { annForecast = mapForecast WrapLedgerView $ ledgerViewForecastAt cfg' st - , annForecastState = st + , annForecastState = forgetLedgerTables st , annForecastTip = ledgerTipSlot st , annForecastEnd = History.mkUpperBound params start <$> singleEraTransition' cfg params start st @@ -381,7 +448,7 @@ instance CanHardFork xs => LedgerSupportsProtocol (HardForkBlock xs) where -- | Forecast annotated with details about the ledger it was derived from data AnnForecast state view blk = AnnForecast { annForecast :: Forecast (view blk) - , annForecastState :: state blk + , annForecastState :: state blk EmptyMK , annForecastTip :: WithOrigin SlotNo , annForecastEnd :: Maybe Bound } @@ -565,8 +632,8 @@ inspectHardForkLedger :: => NP WrapPartialLedgerConfig xs -> NP (K EraParams) xs -> NP TopLevelConfig xs - -> NS (Current LedgerState) xs - -> NS (Current LedgerState) xs + -> NS (Current (Flip LedgerState mk1)) xs + -> NS (Current (Flip LedgerState mk2)) xs -> [LedgerEvent (HardForkBlock xs)] inspectHardForkLedger = go where @@ -574,13 +641,16 @@ inspectHardForkLedger = go => NP WrapPartialLedgerConfig xs -> NP (K EraParams) xs -> NP TopLevelConfig xs - -> NS (Current LedgerState) xs - -> NS (Current LedgerState) xs + -> NS (Current (Flip LedgerState mk1)) xs + -> NS (Current (Flip LedgerState mk2)) xs -> [LedgerEvent (HardForkBlock xs)] go (pc :* _) (K ps :* pss) (c :* _) (Z before) (Z after) = concat [ map liftEvent $ - inspectLedger c (currentState before) (currentState after) + inspectLedger + c + (unFlip $ currentState before) + (unFlip $ currentState after) , case (pss, confirmedBefore, confirmedAfter) of (_, Nothing, Nothing) -> @@ -627,12 +697,12 @@ inspectHardForkLedger = go (unwrapPartialLedgerConfig pc) ps (currentStart before) - (currentState before) + (unFlip $ currentState before) confirmedAfter = singleEraTransition (unwrapPartialLedgerConfig pc) ps (currentStart after) - (currentState after) + (unFlip $ currentState after) go Nil _ _ before _ = case before of {} @@ -735,8 +805,8 @@ shiftUpdate = go Auxiliary -------------------------------------------------------------------------------} -ledgerInfo :: forall blk. SingleEraBlock blk - => Current (Ticked :.: LedgerState) blk -> LedgerEraInfo blk +ledgerInfo :: forall blk mk. SingleEraBlock blk + => Current (FlipTickedLedgerState mk) blk -> LedgerEraInfo blk ledgerInfo _ = LedgerEraInfo $ singleEraInfo (Proxy @blk) ledgerViewInfo :: forall blk f. SingleEraBlock blk @@ -755,3 +825,423 @@ injectLedgerEvent index = OneEraLedgerEvent . injectNS index . WrapLedgerEvent + +{------------------------------------------------------------------------------- + Ledger Tables for the Nary HardForkBlock +-------------------------------------------------------------------------------} + +type HardForkHasLedgerTables :: [Type] -> Constraint +type HardForkHasLedgerTables xs = ( + All (Compose HasLedgerTables LedgerState) xs + , All (Compose HasTickedLedgerTables LedgerState) xs + , All (Compose Eq WrapTxOut) xs + , All (Compose Show WrapTxOut) xs + , All (Compose NoThunks WrapTxOut) xs + , Show (CanonicalTxIn xs) + , Ord (CanonicalTxIn xs) + , NoThunks (CanonicalTxIn xs) + , Eq (HardForkTxOut xs) + , Show (HardForkTxOut xs) + , NoThunks (HardForkTxOut xs) + ) + +-- | The Ledger and Consensus team discussed the fact that we need to be able +-- to reach the TxIn key for an entry from any era, regardless of the era in +-- which it was created, therefore we need to have a "canonical" +-- serialization that doesn't change between eras. For now we are using +-- @'toEraCBOR' \@('ShelleyEra' c)@ as a stop-gap, but Ledger will provide a +-- serialization function into something more efficient. +instance ( HasCanonicalTxIn xs + , SerializeHardForkTxOut xs + ) => CanSerializeLedgerTables (LedgerState (HardForkBlock xs)) where + codecLedgerTables = LedgerTables $ + CodecMK + encodeCanonicalTxIn + (encodeHardForkTxOut (Proxy @xs)) + decodeCanonicalTxIn + (decodeHardForkTxOut (Proxy @xs)) + +-- | Warning: 'projectLedgerTables' and 'withLedgerTables' are prohibitively +-- expensive when using big tables or when used multiple times. See the 'Value' +-- instance for the 'HardForkBlock' for more information. +instance ( HardForkHasLedgerTables xs + , CanHardFork xs + , HasCanonicalTxIn xs + , HasHardForkTxOut xs + ) => HasLedgerTables (LedgerState (HardForkBlock xs)) where + projectLedgerTables :: + forall mk. (CanMapMK mk, CanMapKeysMK mk, ZeroableMK mk) + => LedgerState (HardForkBlock xs) mk + -> LedgerTables (LedgerState (HardForkBlock xs)) mk + projectLedgerTables (HardForkLedgerState st) = hcollapse $ + hcimap (Proxy @(Compose HasLedgerTables LedgerState)) projectOne st + where + projectOne :: + Compose HasLedgerTables LedgerState x + => Index xs x + -> Flip LedgerState mk x + -> K (LedgerTables (LedgerState (HardForkBlock xs)) mk) x + projectOne i l = + K + $ injectLedgerTables i + $ projectLedgerTables + $ unFlip l + + withLedgerTables :: + forall mk any. (CanMapMK mk, CanMapKeysMK mk, ZeroableMK mk) + => LedgerState (HardForkBlock xs) any + -> LedgerTables (LedgerState (HardForkBlock xs)) mk + -> LedgerState (HardForkBlock xs) mk + withLedgerTables (HardForkLedgerState st) tables = HardForkLedgerState $ + hcimap (Proxy @(Compose HasLedgerTables LedgerState)) withLedgerTablesOne st + where + withLedgerTablesOne :: + Compose HasLedgerTables LedgerState x + => Index xs x + -> Flip LedgerState any x + -> Flip LedgerState mk x + withLedgerTablesOne i l = + Flip + $ withLedgerTables (unFlip l) + $ distribLedgerTables i tables + +instance ( HardForkHasLedgerTables xs + , CanHardFork xs + , HasCanonicalTxIn xs + , HasHardForkTxOut xs + ) => HasLedgerTables (Ticked1 (LedgerState (HardForkBlock xs))) where + projectLedgerTables :: + forall mk. (CanMapMK mk, CanMapKeysMK mk, ZeroableMK mk) + => Ticked1 (LedgerState (HardForkBlock xs)) mk + -> LedgerTables (Ticked1 (LedgerState (HardForkBlock xs))) mk + projectLedgerTables st = hcollapse $ + hcimap + (Proxy @(Compose HasTickedLedgerTables LedgerState)) + projectOne + (tickedHardForkLedgerStatePerEra st) + where + projectOne :: + Compose HasTickedLedgerTables LedgerState x + => Index xs x + -> FlipTickedLedgerState mk x + -> K (LedgerTables (Ticked1 (LedgerState (HardForkBlock xs))) mk) x + projectOne i l = + K + $ castLedgerTables + $ injectLedgerTables i + $ castLedgerTables + $ projectLedgerTables + $ getFlipTickedLedgerState l + + withLedgerTables :: + forall mk any. (CanMapMK mk, CanMapKeysMK mk, ZeroableMK mk) + => Ticked1 (LedgerState (HardForkBlock xs)) any + -> LedgerTables (Ticked1 (LedgerState (HardForkBlock xs))) mk + -> Ticked1 (LedgerState (HardForkBlock xs)) mk + withLedgerTables st tables = st { + tickedHardForkLedgerStatePerEra = + hcimap + (Proxy @(Compose HasTickedLedgerTables LedgerState)) + withLedgerTablesOne + (tickedHardForkLedgerStatePerEra st) + } + where + withLedgerTablesOne :: + Compose HasTickedLedgerTables LedgerState x + => Index xs x + -> FlipTickedLedgerState any x + -> FlipTickedLedgerState mk x + withLedgerTablesOne i l = + FlipTickedLedgerState + $ withLedgerTables (getFlipTickedLedgerState l) + $ castLedgerTables + $ distribLedgerTables i (castLedgerTables tables) + +instance ( Key (LedgerState (HardForkBlock xs)) ~ Void + , Value (LedgerState (HardForkBlock xs)) ~ Void + , All (Compose LedgerTablesAreTrivial LedgerState) xs + ) => LedgerTablesAreTrivial (LedgerState (HardForkBlock xs)) where + convertMapKind (HardForkLedgerState st) = HardForkLedgerState $ + hcmap (Proxy @(Compose LedgerTablesAreTrivial LedgerState)) (Flip . convertMapKind . unFlip) st + +instance All (Compose CanStowLedgerTables LedgerState) xs + => CanStowLedgerTables (LedgerState (HardForkBlock xs)) where + stowLedgerTables :: + LedgerState (HardForkBlock xs) ValuesMK + -> LedgerState (HardForkBlock xs) EmptyMK + stowLedgerTables (HardForkLedgerState st) = HardForkLedgerState $ + hcmap (Proxy @(Compose CanStowLedgerTables LedgerState)) stowOne st + where + stowOne :: + Compose CanStowLedgerTables LedgerState x + => Flip LedgerState ValuesMK x + -> Flip LedgerState EmptyMK x + stowOne = Flip . stowLedgerTables . unFlip + + unstowLedgerTables :: + LedgerState (HardForkBlock xs) EmptyMK + -> LedgerState (HardForkBlock xs) ValuesMK + unstowLedgerTables (HardForkLedgerState st) = HardForkLedgerState $ + hcmap (Proxy @(Compose CanStowLedgerTables LedgerState)) unstowOne st + where + unstowOne :: + Compose CanStowLedgerTables LedgerState x + => Flip LedgerState EmptyMK x + -> Flip LedgerState ValuesMK x + unstowOne = Flip . unstowLedgerTables . unFlip + +injectLedgerTables :: + forall xs x mk. ( + CanMapKeysMK mk + , CanMapMK mk + , HasCanonicalTxIn xs + , HasHardForkTxOut xs + ) + => Index xs x + -> LedgerTables (LedgerState x ) mk + -> LedgerTables (LedgerState (HardForkBlock xs)) mk +injectLedgerTables idx = + LedgerTables + . mapKeysMK injTxIn + . mapMK injTxOut + . getLedgerTables + where + injTxIn :: Key (LedgerState x) -> Key (LedgerState (HardForkBlock xs)) + injTxIn = injectCanonicalTxIn idx + + injTxOut :: Value (LedgerState x) -> Value (LedgerState (HardForkBlock xs)) + injTxOut = injectHardForkTxOut idx + +distribLedgerTables :: + forall xs x mk. ( + CanMapKeysMK mk + , Ord (Key (LedgerState x)) + , HasCanonicalTxIn xs + , CanMapMK mk + , HasHardForkTxOut xs + ) + => Index xs x + -> LedgerTables (LedgerState (HardForkBlock xs)) mk + -> LedgerTables (LedgerState x ) mk +distribLedgerTables idx = + LedgerTables + . mapKeysMK (distribCanonicalTxIn idx) + . mapMK (distribHardForkTxOut idx) + . getLedgerTables + +{------------------------------------------------------------------------------- + HardForkTxIn +-------------------------------------------------------------------------------} + +-- | Defaults to a 'CannonicalTxIn' type, but this will probably change in the +-- future to @NS 'WrapTxIn' xs@. See 'HasCanonicalTxIn'. +type instance Key (LedgerState (HardForkBlock xs)) = CanonicalTxIn xs + +-- | Canonical TxIn +-- +-- The Ledger and Consensus team discussed the fact that we need to be able to +-- reach the TxIn key for an entry from any era, regardless of the era in which +-- it was created, therefore we need to have a "canonical" serialization that +-- doesn't change between eras. For now we are requiring that a 'HardForkBlock' +-- has only one associated 'TxIn' type as a stop-gap, but Ledger will provide a +-- serialization function into something more efficient. +type HasCanonicalTxIn :: [Type] -> Constraint +class ( Show (CanonicalTxIn xs) + , Ord (CanonicalTxIn xs) + , NoThunks (CanonicalTxIn xs) + ) => HasCanonicalTxIn xs where + data family CanonicalTxIn (xs :: [Type]) :: Type + + -- | Inject an era-specific 'TxIn' into a 'TxIn' for a 'HardForkBlock'. + injectCanonicalTxIn :: + Index xs x -> + Key (LedgerState x) -> + CanonicalTxIn xs + + -- | Distribute a 'TxIn' for a 'HardForkBlock' to an era-specific 'TxIn'. + distribCanonicalTxIn :: + Index xs x -> + CanonicalTxIn xs -> + Key (LedgerState x) + + encodeCanonicalTxIn :: CanonicalTxIn xs -> CBOR.Encoding + + decodeCanonicalTxIn :: forall s. CBOR.Decoder s (CanonicalTxIn xs) + +{------------------------------------------------------------------------------- + HardForkTxOut +-------------------------------------------------------------------------------} + +-- | Defaults to the 'HardForkTxOut' type +type instance Value (LedgerState (HardForkBlock xs)) = HardForkTxOut xs + +-- | This choice for 'HardForkTxOut' imposes some complications on the code. +-- +-- We deliberately chose not to have all values in the tables be +-- @'Cardano.Ledger.Core.TxOut' era@ because this would require us to traverse +-- and translate the whole UTxO set on era boundaries. To avoid this, we are +-- holding a @'NS' 'WrapTxOut' xs@ instead. +-- +-- Whenever we are carrying a @'LedgerState' ('HardForkBlock' xs) mk@ (or +-- 'Ouroboros.Consensus.Ledger.Extended.ExtLedgerState'), the implied tables are +-- the ones inside the particular ledger state in the 'Telescope' of the +-- 'HardForkState'. +-- +-- <> +-- +-- However, when we are carrying @'LedgerTables' ('HardForkBlock' xs) mk@ we are +-- instead carrying these tables, where the 'Value' is an 'NS'. This means that +-- whenever we are extracting these tables, we are effectively duplicating the +-- UTxO set ('Data.Map.Map') inside, to create an identical one where every +-- element has been translated to the most recent era and unwrapped from the +-- 'NS'. +-- +-- <> +-- +-- To prevent memory explosion, try to only perform one of this transformations, +-- for example: +-- +-- * when applying blocks, inject the tables for the transactions only once, and +-- extract them only once. +-- +-- * when performing queries on the tables (that use +-- 'Ouroboros.Consensus.Ledger.Query.QFTraverseTables'), operate with the +-- tables at the hard fork level until the very end, when you have to +-- promote them to some specific era. +-- +-- = __(image code)__ +-- +-- >>> :{ +-- >>> either (error . show) pure =<< +-- >>> renderToFile "docs/haddocks/hard-fork-tables.svg" defaultEnv (tikz ["positioning", "arrows"]) "\\node at (4.5,4.8) {\\small{LedgerTables (LedgerState (HardForkBlock xs))}};\ +-- >>> \ \\draw (0,0) rectangle (9,5);\ +-- >>> \ \\node (rect) at (1.5,4) [draw,minimum width=1cm,minimum height=0.5cm] {TxIn};\ +-- >>> \ \\node (oneOf) at (3.5,4) [draw=none] {NS};\ +-- >>> \ \\draw (rect) -> (oneOf);\ +-- >>> \ \\node (sh) at (6.5,4) [draw,minimum width=1cm,minimum height=0.5cm] {BlockATxOut};\ +-- >>> \ \\node (al) at (6.5,3) [draw,minimum width=1cm,minimum height=0.5cm] {BlockBTxOut};\ +-- >>> \ \\node (my) at (6.5,2) [draw=none,minimum width=1cm,minimum height=0.5cm] {...};\ +-- >>> \ \\node (ba) at (6.5,1) [draw,minimum width=1cm,minimum height=0.5cm] {BlockNTxOut};\ +-- >>> \ \\draw (oneOf) -> (sh);\ +-- >>> \ \\draw (oneOf) -> (al);\ +-- >>> \ \\draw (oneOf) -> (ba);\ +-- >>> \ \\draw (3,0.5) rectangle (8,4.5);" +-- >>> :} +-- +-- >>> :{ +-- >>> either (error . show) pure =<< +-- >>> renderToFile "docs/haddocks/hard-fork-tables-per-block.svg" defaultEnv (tikz ["positioning", "arrows"]) "\\node at (5,4.8) {\\small{LedgerState (HardForkBlock xs)}};\ +-- >>> \ \\draw (0,0) rectangle (10,5);\ +-- >>> \ \\node (oneOf2) at (2,4) [draw=none] {HardForkState};\ +-- >>> \ \\node (bb) at (5,4) [draw,minimum width=1cm,minimum height=0.5cm] {BlockAState};\ +-- >>> \ \\node (bt) at (8,4) [draw,minimum width=1cm,minimum height=0.5cm] {BlockATables};\ +-- >>> \ \\node (sb) at (5,3) [draw,minimum width=1cm,minimum height=0.5cm] {BlockBState};\ +-- >>> \ \\node (st) at (8,3) [draw,minimum width=1cm,minimum height=0.5cm] {BlockBTables};\ +-- >>> \ \\node (db) at (5,2) [draw=none,minimum width=1cm,minimum height=0.5cm] {...};\ +-- >>> \ \\node (dt) at (8,2) [draw=none,minimum width=1cm,minimum height=0.5cm] {...};\ +-- >>> \ \\node (bab) at (5,1) [draw,minimum width=1cm,minimum height=0.5cm] {BlockNState};\ +-- >>> \ \\node (bat) at (8,1) [draw,minimum width=1cm,minimum height=0.5cm] {BlockNTables};\ +-- >>> \ \\draw (oneOf2) -> (bb);\ +-- >>> \ \\draw (bb) -> (bt);\ +-- >>> \ \\draw (oneOf2) -> (sb);\ +-- >>> \ \\draw (sb) -> (st);\ +-- >>> \ \\draw (oneOf2) -> (bab);\ +-- >>> \ \\draw (bab) -> (bat);" +-- >>> :} +type DefaultHardForkTxOut xs = NS WrapTxOut xs + +class HasHardForkTxOut xs where + type HardForkTxOut xs :: Type + type HardForkTxOut xs = DefaultHardForkTxOut xs + + injectHardForkTxOut :: Index xs x -> Value (LedgerState x) -> HardForkTxOut xs + distribHardForkTxOut :: Index xs x -> HardForkTxOut xs -> Value (LedgerState x) + +injectHardForkTxOutDefault :: + Index xs x + -> Value (LedgerState x) + -> DefaultHardForkTxOut xs +injectHardForkTxOutDefault idx = injectNS idx . WrapTxOut + +distribHardForkTxOutDefault :: + CanHardFork xs + => Index xs x + -> DefaultHardForkTxOut xs + -> Value (LedgerState x) +distribHardForkTxOutDefault idx = + unwrapTxOut + . apFn (projectNP idx $ composeTxOutTranslations $ ipTranslateTxOut hardForkEraTranslation) + . K + +composeTxOutTranslations :: + SListI xs + => InPairs TranslateTxOut xs + -> NP (K (NS WrapTxOut xs) -.-> WrapTxOut) xs +composeTxOutTranslations = \case + PNil -> + fn (unZ . unK) :* Nil + PCons (TranslateTxOut t) ts -> + fn ( eitherNS + id + (error "composeTranslations: anachrony") + . unK + ) + :* hmap + (\innerf -> fn $ + apFn innerf + . K + . eitherNS + (Z . WrapTxOut . t . unwrapTxOut) + id + . unK) + (composeTxOutTranslations ts) + where + eitherNS :: (f x -> c) -> (NS f xs -> c) -> NS f (x ': xs) -> c + eitherNS l r = \case + Z x -> l x + S x -> r x + +class HasHardForkTxOut xs => SerializeHardForkTxOut xs where + encodeHardForkTxOut :: Proxy xs -> HardForkTxOut xs -> CBOR.Encoding + decodeHardForkTxOut :: Proxy xs -> CBOR.Decoder s (HardForkTxOut xs) + +encodeHardForkTxOutDefault :: + forall xs. All (Compose CanSerializeLedgerTables LedgerState) xs + => DefaultHardForkTxOut xs + -> CBOR.Encoding +encodeHardForkTxOutDefault = + hcollapse + . hcimap (Proxy @(Compose CanSerializeLedgerTables LedgerState)) each + where + each :: + forall x. CanSerializeLedgerTables (LedgerState x) + => Index xs x + -> WrapTxOut x + -> K CBOR.Encoding x + each idx (WrapTxOut txout) = K $ + CBOR.encodeListLen 2 + <> CBOR.encodeWord8 (toWord8 idx) + <> encodeValue (getLedgerTables $ codecLedgerTables @(LedgerState x)) txout + +decodeHardForkTxOutDefault :: + forall s xs. All (Compose CanSerializeLedgerTables LedgerState) xs + => CBOR.Decoder s (DefaultHardForkTxOut xs) +decodeHardForkTxOutDefault = do + CBOR.decodeListLenOf 2 + tag <- CBOR.decodeWord8 + aDecoder tag + where + each :: + forall x. CanSerializeLedgerTables (LedgerState x) + => Index xs x + -> forall s'. (K () -.-> K (CBOR.Decoder s' (NS WrapTxOut xs))) x + each idx = fn + (\(K ()) -> K $ injectNS idx . WrapTxOut <$> decodeValue (getLedgerTables $ codecLedgerTables @(LedgerState x))) + + aDecoder :: Word8 -> CBOR.Decoder s' (NS WrapTxOut xs) + aDecoder w = + hcollapse + $ flip hap (fromMaybe (error "Unkown tag") $ nsFromIndex w) + $ hcmap (Proxy @(Compose CanSerializeLedgerTables LedgerState)) + each + (indices @xs) diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/HardFork/Combinator/Ledger/CommonProtocolParams.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/HardFork/Combinator/Ledger/CommonProtocolParams.hs index 0d47fd2e21..29811fa565 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/HardFork/Combinator/Ledger/CommonProtocolParams.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/HardFork/Combinator/Ledger/CommonProtocolParams.hs @@ -1,27 +1,36 @@ +{-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE RankNTypes #-} +{-# LANGUAGE UndecidableInstances #-} {-# OPTIONS_GHC -Wno-orphans #-} module Ouroboros.Consensus.HardFork.Combinator.Ledger.CommonProtocolParams () where import Data.SOP.BasicFunctors +import Data.SOP.Functors import Data.SOP.Strict import Ouroboros.Consensus.HardFork.Combinator.Abstract import Ouroboros.Consensus.HardFork.Combinator.Basics -import Ouroboros.Consensus.HardFork.Combinator.Ledger () +import Ouroboros.Consensus.HardFork.Combinator.Ledger + (HardForkHasLedgerTables, HasCanonicalTxIn, + HasHardForkTxOut) import qualified Ouroboros.Consensus.HardFork.Combinator.State as State import Ouroboros.Consensus.Ledger.CommonProtocolParams -instance CanHardFork xs => CommonProtocolParams (HardForkBlock xs) where +instance ( CanHardFork xs + , HardForkHasLedgerTables xs + , HasCanonicalTxIn xs + , HasHardForkTxOut xs + ) => CommonProtocolParams (HardForkBlock xs) where maxHeaderSize = askCurrentLedger maxHeaderSize maxTxSize = askCurrentLedger maxTxSize askCurrentLedger :: CanHardFork xs - => (forall blk. CommonProtocolParams blk => LedgerState blk -> a) - -> LedgerState (HardForkBlock xs) -> a + => (forall blk. CommonProtocolParams blk => LedgerState blk mk -> a) + -> LedgerState (HardForkBlock xs) mk -> a askCurrentLedger f = hcollapse - . hcmap proxySingle (K . f) + . hcmap proxySingle (K . f . unFlip) . State.tip . hardForkLedgerStatePerEra diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/HardFork/Combinator/Ledger/PeerSelection.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/HardFork/Combinator/Ledger/PeerSelection.hs index d1e0d57f0a..e21c909bff 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/HardFork/Combinator/Ledger/PeerSelection.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/HardFork/Combinator/Ledger/PeerSelection.hs @@ -3,6 +3,7 @@ module Ouroboros.Consensus.HardFork.Combinator.Ledger.PeerSelection () where import Data.SOP.BasicFunctors +import Data.SOP.Functors import Data.SOP.Strict import Ouroboros.Consensus.HardFork.Combinator.Abstract import Ouroboros.Consensus.HardFork.Combinator.Basics @@ -13,6 +14,6 @@ import Ouroboros.Consensus.Ledger.SupportsPeerSelection instance CanHardFork xs => LedgerSupportsPeerSelection (HardForkBlock xs) where getPeers = hcollapse - . hcmap proxySingle (K . getPeers) + . hcmap proxySingle (K . getPeers . unFlip) . State.tip . hardForkLedgerStatePerEra diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/HardFork/Combinator/Ledger/Query.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/HardFork/Combinator/Ledger/Query.hs index 4bb3fd44e6..687afef0e2 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/HardFork/Combinator/Ledger/Query.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/HardFork/Combinator/Ledger/Query.hs @@ -10,15 +10,18 @@ {-# LANGUAGE RecordWildCards #-} {-# LANGUAGE ScopedTypeVariables #-} {-# LANGUAGE StandaloneDeriving #-} +{-# LANGUAGE StandaloneKindSignatures #-} {-# LANGUAGE TypeApplications #-} {-# LANGUAGE TypeFamilies #-} {-# LANGUAGE TypeOperators #-} {-# LANGUAGE UndecidableInstances #-} +{-# LANGUAGE UndecidableSuperClasses #-} {-# OPTIONS_GHC -Wno-orphans #-} module Ouroboros.Consensus.HardFork.Combinator.Ledger.Query ( BlockQuery (..) + , BlockSupportsHFLedgerQuery (..) , HardForkQueryResult , QueryAnytime (..) , QueryHardFork (..) @@ -45,10 +48,13 @@ import Data.Reflection (give) import Data.SOP.BasicFunctors import Data.SOP.Constraint import Data.SOP.Counting (getExactly) +import Data.SOP.Functors (Flip (..)) +import Data.SOP.Index import Data.SOP.Match (Mismatch (..), mustMatchNS) import Data.SOP.Strict import Data.Type.Equality import Data.Typeable (Typeable) +import NoThunks.Class import Ouroboros.Consensus.Block import Ouroboros.Consensus.Config import Ouroboros.Consensus.HardFork.Abstract (hardForkSummary) @@ -57,7 +63,8 @@ import Ouroboros.Consensus.HardFork.Combinator.AcrossEras import Ouroboros.Consensus.HardFork.Combinator.Basics import Ouroboros.Consensus.HardFork.Combinator.Block import Ouroboros.Consensus.HardFork.Combinator.Info -import Ouroboros.Consensus.HardFork.Combinator.Ledger () +import Ouroboros.Consensus.HardFork.Combinator.Ledger + (HardForkHasLedgerTables) import Ouroboros.Consensus.HardFork.Combinator.PartialConfig import Ouroboros.Consensus.HardFork.Combinator.State (Current (..), Past (..), Situated (..)) @@ -68,29 +75,23 @@ import qualified Ouroboros.Consensus.HardFork.History as History import Ouroboros.Consensus.HardFork.History.EraParams (EraParamsFormat (..)) import Ouroboros.Consensus.HeaderValidation +import Ouroboros.Consensus.Ledger.Abstract import Ouroboros.Consensus.Ledger.Extended import Ouroboros.Consensus.Ledger.Query import Ouroboros.Consensus.Node.Serialisation (Some (..)) -import Ouroboros.Consensus.TypeFamilyWrappers (WrapChainDepState (..)) +import Ouroboros.Consensus.Storage.LedgerDB +import Ouroboros.Consensus.TypeFamilyWrappers (WrapChainDepState (..), + WrapTxOut) import Ouroboros.Consensus.Util (ShowProxy) - -instance Typeable xs => ShowProxy (BlockQuery (HardForkBlock xs)) where - -instance All SingleEraBlock xs => ShowQuery (BlockQuery (HardForkBlock xs)) where - showResult (QueryAnytime qry _) result = showResult qry result - showResult (QueryHardFork qry) result = showResult qry result - showResult (QueryIfCurrent qry) mResult = - case mResult of - Left err -> show err - Right result -> showResult qry result +import Ouroboros.Consensus.Util.IOLike (MonadSTM (atomically)) type HardForkQueryResult xs = Either (MismatchEraInfo xs) -data instance BlockQuery (HardForkBlock xs) :: Type -> Type where +data instance BlockQuery (HardForkBlock xs) footprint result where -- | Answer a query about an era if it is the current one. QueryIfCurrent :: - QueryIfCurrent xs result - -> BlockQuery (HardForkBlock xs) (HardForkQueryResult xs result) + QueryIfCurrent xs footprint result + -> BlockQuery (HardForkBlock xs) footprint (HardForkQueryResult xs result) -- | Answer a query about an era from /any/ era. -- @@ -100,7 +101,7 @@ data instance BlockQuery (HardForkBlock xs) :: Type -> Type where IsNonEmpty xs => QueryAnytime result -> EraIndex (x ': xs) - -> BlockQuery (HardForkBlock (x ': xs)) result + -> BlockQuery (HardForkBlock (x ': xs)) QFNoTables result -- | Answer a query about the hard fork combinator -- @@ -109,10 +110,100 @@ data instance BlockQuery (HardForkBlock xs) :: Type -> Type where QueryHardFork :: IsNonEmpty xs => QueryHardFork (x ': xs) result - -> BlockQuery (HardForkBlock (x ': xs)) result + -> BlockQuery (HardForkBlock (x ': xs)) QFNoTables result + +-- | Queries that use ledger tables usually can be implemented faster if we work +-- with the hard fork tables rather than projecting everything to the +-- appropriate era before we process the query. This class should be used to +-- implement how these queries that have a footprint which is not @QFNoTables@ +-- are answered. +class ( All (Compose NoThunks WrapTxOut) xs + , All (Compose Show WrapTxOut) xs + , All (Compose Eq WrapTxOut) xs + , All (Compose HasTickedLedgerTables LedgerState) xs + , All (Compose HasLedgerTables LedgerState) xs + ) => BlockSupportsHFLedgerQuery xs where + answerBlockQueryHFLookup :: + All SingleEraBlock xs + => Monad m + => Index xs x + -> ExtLedgerCfg x + -> BlockQuery x QFLookupTables result + -> ReadOnlyForker' m (HardForkBlock xs) + -> m result + + answerBlockQueryHFTraverse :: + All SingleEraBlock xs + => Monad m + => Index xs x + -> ExtLedgerCfg x + -> BlockQuery x QFTraverseTables result + -> ReadOnlyForker' m (HardForkBlock xs) + -> m result + + -- | The @QFTraverseTables@ queries consist of some filter on the @TxOut@. This class + -- provides that filter so that @answerBlockQueryHFAll@ can be implemented + -- in an abstract manner depending on this function. + queryLedgerGetTraversingFilter :: + Index xs x + -> BlockQuery x QFTraverseTables result + -> Value (LedgerState (HardForkBlock xs)) + -> Bool + +{------------------------------------------------------------------------------- + Instances +-------------------------------------------------------------------------------} + +------ +-- Show +------ + +instance Typeable xs => ShowProxy (BlockQuery (HardForkBlock xs)) where +-- Use default implementation + +deriving instance All SingleEraBlock xs => Show (BlockQuery (HardForkBlock xs) footprint result) + +instance All SingleEraBlock xs + => ShowQuery (BlockQuery (HardForkBlock xs) footprint) where + showResult (QueryAnytime qry _) result = showResult qry result + showResult (QueryHardFork qry) result = showResult qry result + showResult (QueryIfCurrent qry) mResult = + case mResult of + Left err -> show err + Right result -> showResult qry result + +------ +-- Eq +------ + +instance All SingleEraBlock xs => SameDepIndex2 (BlockQuery (HardForkBlock xs)) where + sameDepIndex2 (QueryIfCurrent qry) (QueryIfCurrent qry') = + (\Refl -> Refl) <$> sameDepIndex2 qry qry' + sameDepIndex2 (QueryIfCurrent {}) _ = + Nothing + sameDepIndex2 (QueryAnytime qry era) (QueryAnytime qry' era') + | era == era' + = (\Refl -> Refl) <$> sameDepIndex qry qry' + | otherwise + = Nothing + sameDepIndex2(QueryAnytime {}) _ = + Nothing + sameDepIndex2 (QueryHardFork qry) (QueryHardFork qry') = + (\Refl -> Refl) <$> sameDepIndex qry qry' + sameDepIndex2 (QueryHardFork {}) _ = + Nothing + +{------------------------------------------------------------------------------- + Query Ledger +-------------------------------------------------------------------------------} -instance All SingleEraBlock xs => BlockSupportsLedgerQuery (HardForkBlock xs) where - answerBlockQuery +instance ( All SingleEraBlock xs + , HardForkHasLedgerTables xs + , BlockSupportsHFLedgerQuery xs + , CanHardFork xs + ) + => BlockSupportsLedgerQuery (HardForkBlock xs) where + answerPureBlockQuery (ExtLedgerCfg cfg) query ext@(ExtLedgerState st@(HardForkLedgerState hardForkState) _) = @@ -138,14 +229,46 @@ instance All SingleEraBlock xs => BlockSupportsLedgerQuery (HardForkBlock xs) wh lcfg = configLedger cfg ei = State.epochInfoLedger lcfg hardForkState + answerBlockQueryLookup + (ExtLedgerCfg cfg) + qry + forker = do + hardForkState <- hardForkLedgerStatePerEra . ledgerState <$> atomically (roforkerGetLedgerState forker) + let ei = State.epochInfoLedger lcfg hardForkState + cfgs = hmap ExtLedgerCfg $ distribTopLevelConfig ei cfg + case qry of + QueryIfCurrent queryIfCurrent -> + interpretQueryIfCurrentOne + cfgs + queryIfCurrent + forker + where + lcfg = configLedger cfg + + answerBlockQueryTraverse + (ExtLedgerCfg cfg) + qry + forker = do + hardForkState <- hardForkLedgerStatePerEra . ledgerState <$> atomically (roforkerGetLedgerState forker) + let ei = State.epochInfoLedger lcfg hardForkState + cfgs = hmap ExtLedgerCfg $ distribTopLevelConfig ei cfg + case qry of + QueryIfCurrent queryIfCurrent -> + interpretQueryIfCurrentAll + cfgs + queryIfCurrent + forker + where + lcfg = configLedger cfg + -- | Precondition: the 'ledgerState' and 'headerState' should be from the same -- era. In practice, this is _always_ the case, unless the 'ExtLedgerState' was -- manually crafted. distribExtLedgerState :: All SingleEraBlock xs - => ExtLedgerState (HardForkBlock xs) -> NS ExtLedgerState xs + => ExtLedgerState (HardForkBlock xs) mk -> NS (Flip ExtLedgerState mk) xs distribExtLedgerState (ExtLedgerState ledgerState headerState) = - hmap (\(Pair hst lst) -> ExtLedgerState lst hst) $ + hmap (\(Pair hst lst) -> Flip $ ExtLedgerState (unFlip lst) hst) $ mustMatchNS "HeaderState" (distribHeaderState headerState) @@ -166,29 +289,10 @@ distribHeaderState (HeaderState tip chainDepState) = (\(Pair t cds) -> HeaderState (NotOrigin t) (unwrapChainDepState cds)) (mustMatchNS "AnnTip" (distribAnnTip annTip) (State.tip chainDepState)) -instance All SingleEraBlock xs => SameDepIndex (BlockQuery (HardForkBlock xs)) where - sameDepIndex (QueryIfCurrent qry) (QueryIfCurrent qry') = - apply Refl <$> sameDepIndex qry qry' - sameDepIndex (QueryIfCurrent {}) _ = - Nothing - sameDepIndex (QueryAnytime qry era) (QueryAnytime qry' era') - | era == era' - = sameDepIndex qry qry' - | otherwise - = Nothing - sameDepIndex (QueryAnytime {}) _ = - Nothing - sameDepIndex (QueryHardFork qry) (QueryHardFork qry') = - sameDepIndex qry qry' - sameDepIndex (QueryHardFork {}) _ = - Nothing - -deriving instance All SingleEraBlock xs => Show (BlockQuery (HardForkBlock xs) result) - -getHardForkQuery :: BlockQuery (HardForkBlock xs) result +getHardForkQuery :: BlockQuery (HardForkBlock xs) footprint result -> (forall result'. result :~: HardForkQueryResult xs result' - -> QueryIfCurrent xs result' + -> QueryIfCurrent xs footprint result' -> r) -> (forall x' xs'. xs :~: x' ': xs' @@ -211,43 +315,90 @@ getHardForkQuery q k1 k2 k3 = case q of Current era queries -------------------------------------------------------------------------------} -data QueryIfCurrent :: [Type] -> Type -> Type where - QZ :: BlockQuery x result -> QueryIfCurrent (x ': xs) result - QS :: QueryIfCurrent xs result -> QueryIfCurrent (x ': xs) result +type QueryIfCurrent :: [Type] -> QueryFootprint -> Type -> Type +data QueryIfCurrent xs footprint result where + QZ :: BlockQuery x footprint result -> QueryIfCurrent (x ': xs) footprint result + QS :: QueryIfCurrent xs footprint result -> QueryIfCurrent (x ': xs) footprint result -deriving instance All SingleEraBlock xs => Show (QueryIfCurrent xs result) +deriving instance All SingleEraBlock xs => Show (QueryIfCurrent xs footprint result) -instance All SingleEraBlock xs => ShowQuery (QueryIfCurrent xs) where +instance All SingleEraBlock xs => ShowQuery (QueryIfCurrent xs footprint) where showResult (QZ qry) = showResult qry showResult (QS qry) = showResult qry -instance All SingleEraBlock xs => SameDepIndex (QueryIfCurrent xs) where - sameDepIndex (QZ qry) (QZ qry') = sameDepIndex qry qry' - sameDepIndex (QS qry) (QS qry') = sameDepIndex qry qry' - sameDepIndex _ _ = Nothing +instance All SingleEraBlock xs => SameDepIndex2 (QueryIfCurrent xs) where + sameDepIndex2 (QZ qry) (QZ qry') = sameDepIndex2 qry qry' + sameDepIndex2 (QS qry) (QS qry') = sameDepIndex2 qry qry' + sameDepIndex2 _ _ = Nothing interpretQueryIfCurrent :: forall result xs. All SingleEraBlock xs => NP ExtLedgerCfg xs - -> QueryIfCurrent xs result - -> NS ExtLedgerState xs + -> QueryIfCurrent xs QFNoTables result + -> NS (Flip ExtLedgerState EmptyMK) xs -> HardForkQueryResult xs result interpretQueryIfCurrent = go where go :: All SingleEraBlock xs' => NP ExtLedgerCfg xs' - -> QueryIfCurrent xs' result - -> NS ExtLedgerState xs' + -> QueryIfCurrent xs' QFNoTables result + -> NS (Flip ExtLedgerState EmptyMK) xs' -> HardForkQueryResult xs' result - go (c :* _) (QZ qry) (Z st) = - Right $ answerBlockQuery c qry st + go (c :* _) (QZ qry) (Z (Flip st)) = + Right $ answerPureBlockQuery c qry st go (_ :* cs) (QS qry) (S st) = first shiftMismatch $ go cs qry st go _ (QZ qry) (S st) = - Left $ MismatchEraInfo $ ML (queryInfo qry) (hcmap proxySingle ledgerInfo st) - go _ (QS qry) (Z st) = + Left $ MismatchEraInfo $ ML (queryInfo qry) (hcmap proxySingle (ledgerInfo . unFlip) st) + go _ (QS qry) (Z (Flip st)) = Left $ MismatchEraInfo $ MR (hardForkQueryInfo qry) (ledgerInfo st) +interpretQueryIfCurrentOne :: + forall result xs m. (MonadSTM m, BlockSupportsHFLedgerQuery xs, CanHardFork xs) + => NP ExtLedgerCfg xs + -> QueryIfCurrent xs QFLookupTables result + -> ReadOnlyForker' m (HardForkBlock xs) + -> m (HardForkQueryResult xs result) +interpretQueryIfCurrentOne cfg q forker = do + st <- distribExtLedgerState <$> atomically (roforkerGetLedgerState forker) + go indices cfg q st + where + go :: All SingleEraBlock xs' + => NP (Index xs) xs' + -> NP ExtLedgerCfg xs' + -> QueryIfCurrent xs' QFLookupTables result + -> NS (Flip ExtLedgerState EmptyMK) xs' + -> m (HardForkQueryResult xs' result) + go (idx :* _) (c :* _) (QZ qry) _ = + Right <$> answerBlockQueryHFLookup idx c qry forker + go (_ :* idx) (_ :* cs) (QS qry) (S st) = + first shiftMismatch <$> go idx cs qry st + go _ _ (QS qry) (Z (Flip st)) = + pure $ Left $ MismatchEraInfo $ MR (hardForkQueryInfo qry) (ledgerInfo st) + +interpretQueryIfCurrentAll :: + forall result xs m. (MonadSTM m, BlockSupportsHFLedgerQuery xs, CanHardFork xs) + => NP ExtLedgerCfg xs + -> QueryIfCurrent xs QFTraverseTables result + -> ReadOnlyForker' m (HardForkBlock xs) + -> m (HardForkQueryResult xs result) +interpretQueryIfCurrentAll cfg q forker = do + st <- distribExtLedgerState <$> atomically (roforkerGetLedgerState forker) + go indices cfg q st + where + go :: All SingleEraBlock xs' + => NP (Index xs) xs' + -> NP ExtLedgerCfg xs' + -> QueryIfCurrent xs' QFTraverseTables result + -> NS (Flip ExtLedgerState EmptyMK) xs' + -> m (HardForkQueryResult xs' result) + go (idx :* _) (c :* _) (QZ qry) _ = + Right <$> answerBlockQueryHFTraverse idx c qry forker + go (_ :* idx) (_ :* cs) (QS qry) (S st) = + first shiftMismatch <$> go idx cs qry st + go _ _ (QS qry) (Z (Flip st)) = + pure $ Left $ MismatchEraInfo $ MR (hardForkQueryInfo qry) (ledgerInfo st) + {------------------------------------------------------------------------------- Any era queries -------------------------------------------------------------------------------} @@ -261,14 +412,14 @@ instance ShowQuery QueryAnytime where showResult GetEraStart = show instance SameDepIndex QueryAnytime where - sameDepIndex GetEraStart GetEraStart = Just Refl + sameDepIndex GetEraStart GetEraStart = Just Refl interpretQueryAnytime :: - forall result xs. All SingleEraBlock xs + forall result xs mk. All SingleEraBlock xs => HardForkLedgerConfig xs -> QueryAnytime result -> EraIndex xs - -> State.HardForkState LedgerState xs + -> State.HardForkState (Flip LedgerState mk) xs -> result interpretQueryAnytime cfg query (EraIndex era) st = answerQueryAnytime cfg query (State.situate era st) @@ -277,7 +428,7 @@ answerQueryAnytime :: All SingleEraBlock xs => HardForkLedgerConfig xs -> QueryAnytime result - -> Situated h LedgerState xs + -> Situated h (Flip LedgerState mk) xs -> result answerQueryAnytime HardForkLedgerConfig{..} = go cfgs (getExactly (getShape hardForkLedgerConfigShape)) @@ -288,7 +439,7 @@ answerQueryAnytime HardForkLedgerConfig{..} = => NP WrapPartialLedgerConfig xs' -> NP (K EraParams) xs' -> QueryAnytime result - -> Situated h LedgerState xs' + -> Situated h (Flip LedgerState mk) xs' -> result go Nil _ _ ctxt = case ctxt of {} go (c :* cs) (K ps :* pss) GetEraStart ctxt = case ctxt of @@ -302,7 +453,7 @@ answerQueryAnytime HardForkLedgerConfig{..} = (unwrapPartialLedgerConfig c) ps (currentStart cur) - (currentState cur) + (unFlip $ currentState cur) {------------------------------------------------------------------------------- Hard fork queries @@ -332,7 +483,7 @@ interpretQueryHardFork :: All SingleEraBlock xs => HardForkLedgerConfig xs -> QueryHardFork xs result - -> LedgerState (HardForkBlock xs) + -> LedgerState (HardForkBlock xs) mk -> result interpretQueryHardFork cfg query st = case query of @@ -384,21 +535,21 @@ decodeQueryHardForkResult epf = \case Auxiliary -------------------------------------------------------------------------------} -ledgerInfo :: forall blk. SingleEraBlock blk - => ExtLedgerState blk +ledgerInfo :: forall blk mk. SingleEraBlock blk + => ExtLedgerState blk mk -> LedgerEraInfo blk ledgerInfo _ = LedgerEraInfo $ singleEraInfo (Proxy @blk) -queryInfo :: forall blk query result. SingleEraBlock blk - => query blk result -> SingleEraInfo blk +queryInfo :: forall blk query (footprint :: QueryFootprint) result. SingleEraBlock blk + => query blk footprint result -> SingleEraInfo blk queryInfo _ = singleEraInfo (Proxy @blk) hardForkQueryInfo :: All SingleEraBlock xs - => QueryIfCurrent xs result -> NS SingleEraInfo xs + => QueryIfCurrent xs footprint result -> NS SingleEraInfo xs hardForkQueryInfo = go where go :: All SingleEraBlock xs' - => QueryIfCurrent xs' result -> NS SingleEraInfo xs' + => QueryIfCurrent xs' footprint result -> NS SingleEraInfo xs' go (QZ qry) = Z (queryInfo qry) go (QS qry) = S (go qry) diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/HardFork/Combinator/Mempool.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/HardFork/Combinator/Mempool.hs index a987cb7c32..4e27c9229e 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/HardFork/Combinator/Mempool.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/HardFork/Combinator/Mempool.hs @@ -32,7 +32,7 @@ import Data.Kind (Type) import qualified Data.Measure as Measure import Data.SOP.BasicFunctors import Data.SOP.Constraint -import Data.SOP.Functors (Product2 (..)) +import Data.SOP.Functors import Data.SOP.Index import Data.SOP.InPairs (InPairs) import qualified Data.SOP.InPairs as InPairs @@ -47,14 +47,13 @@ import Ouroboros.Consensus.HardFork.Combinator.AcrossEras import Ouroboros.Consensus.HardFork.Combinator.Basics import Ouroboros.Consensus.HardFork.Combinator.Info import Ouroboros.Consensus.HardFork.Combinator.InjectTxs -import Ouroboros.Consensus.HardFork.Combinator.Ledger (Ticked (..)) +import Ouroboros.Consensus.HardFork.Combinator.Ledger import Ouroboros.Consensus.HardFork.Combinator.PartialConfig - (WrapPartialLedgerConfig (..)) import qualified Ouroboros.Consensus.HardFork.Combinator.State as State import Ouroboros.Consensus.Ledger.Abstract import Ouroboros.Consensus.Ledger.SupportsMempool import Ouroboros.Consensus.TypeFamilyWrappers -import Ouroboros.Consensus.Util (ShowProxy) +import Ouroboros.Consensus.Util data HardForkApplyTxErr xs = -- | Validation error from one of the eras @@ -96,12 +95,33 @@ instance Typeable xs => ShowProxy (GenTx (HardForkBlock xs)) where type instance ApplyTxErr (HardForkBlock xs) = HardForkApplyTxErr xs -instance CanHardFork xs => LedgerSupportsMempool (HardForkBlock xs) where - applyTx = applyHelper ModeApply - - reapplyTx = \cfg slot vtx tls -> - fmap (\(tls', _vtx) -> tls') - $ applyHelper +-- | Just to discharge cognitive load, this is equivalent to: +-- +-- > ([invalidTxs, ...], [validTxs, ...], st) +-- +-- Where @invalidTxs@ and @validTxs@ are hard-fork transactions, and only @st@ +-- depends on a particular @blk@. +-- +-- We do not define this as a new data type to reuse the @Applicative@ and +-- friends instances of these type constructors, which are useful to +-- @hsequence'@ a @HardForkState@ of this. +type ComposedReapplyTxsResult xs = + (,,) + [Invalidated (HardForkBlock xs)] + [Validated (GenTx (HardForkBlock xs))] + :.: + FlipTickedLedgerState DiffMK + +instance ( CanHardFork xs + , HardForkHasLedgerTables xs + , HasCanonicalTxIn xs + , HasHardForkTxOut xs + ) => LedgerSupportsMempool (HardForkBlock xs) where + applyTx = applyHelper ModeApply + + reapplyTx cfg slot vtx tls = + fst + <$> applyHelper ModeReapply cfg DoNotIntervene @@ -109,6 +129,61 @@ instance CanHardFork xs => LedgerSupportsMempool (HardForkBlock xs) where (WrapValidatedGenTx vtx) tls + reapplyTxs + HardForkLedgerConfig{..} + slot + vtxs + (TickedHardForkLedgerState transition hardForkState) = + (\(err, val, st') -> + ReapplyTxsResult (mismatched' ++ err) val (TickedHardForkLedgerState transition st')) + . hsequence' + $ hcizipWith proxySingle modeApplyCurrent cfgs matched + + where + pcfgs = getPerEraLedgerConfig hardForkLedgerConfigPerEra + cfgs = hcmap proxySingle (completeLedgerConfig'' ei) pcfgs + ei = State.epochInfoPrecomputedTransitionInfo + hardForkLedgerConfigShape + transition + hardForkState + + -- Transactions are unwrapped into the particular era transactions. + (mismatched, matched) = + matchPolyTxs + -- How to translate txs to later eras + (InPairs.hmap snd2 (InPairs.requiringBoth cfgs hardForkInjectTxs)) + (map (getOneEraValidatedGenTx . getHardForkValidatedGenTx) vtxs) + hardForkState + + mismatched' :: [Invalidated (HardForkBlock xs)] + mismatched' = + map (\x -> flip Invalidated ( HardForkApplyTxErrWrongEra + $ MismatchEraInfo + $ Match.bihcmap proxySingle singleEraInfo ledgerInfo + $ snd x) + . HardForkValidatedGenTx + . OneEraValidatedGenTx + . fst + $ x) + mismatched + + modeApplyCurrent :: forall blk. + SingleEraBlock blk + => Index xs blk + -> WrapLedgerConfig blk + -> Product + (ListOfTxs WrapValidatedGenTx xs) + (FlipTickedLedgerState ValuesMK) blk + -> ComposedReapplyTxsResult xs blk + modeApplyCurrent index cfg (Pair txs (FlipTickedLedgerState st)) = + let ReapplyTxsResult err val st' = + reapplyTxs (unwrapLedgerConfig cfg) slot [ unwrapValidatedGenTx t | TxsWithOriginal _ t <- txsList txs ] st + in Comp + ( map (\x -> flip Invalidated (injectApplyTxErr index $ getReason x) . injectValidatedGenTx index . getInvalidated $ x) err + , map (HardForkValidatedGenTx . OneEraValidatedGenTx . injectNS index . WrapValidatedGenTx) val + , FlipTickedLedgerState st' + ) + txForgetValidated = HardForkGenTx . OneEraGenTx @@ -116,6 +191,17 @@ instance CanHardFork xs => LedgerSupportsMempool (HardForkBlock xs) where . getOneEraValidatedGenTx . getHardForkValidatedGenTx + getTransactionKeySets (HardForkGenTx (OneEraGenTx ns)) = + hcollapse + $ hcimap proxySingle f ns + where + f :: + SingleEraBlock x + => Index xs x + -> GenTx x + -> K (LedgerTables (LedgerState (HardForkBlock xs)) KeysMK) x + f idx tx = K $ injectLedgerTables idx $ getTransactionKeySets tx + instance CanHardFork xs => TxLimits (HardForkBlock xs) where type TxMeasure (HardForkBlock xs) = HardForkTxMeasure xs @@ -136,14 +222,14 @@ instance CanHardFork xs => TxLimits (HardForkBlock xs) where SingleEraBlock blk => Index xs blk -> WrapPartialLedgerConfig blk - -> (Ticked :.: LedgerState) blk + -> FlipTickedLedgerState mk blk -> K (HardForkTxMeasure xs) blk aux idx pcfg st' = K $ hardForkInjTxMeasure . injectNS idx . WrapTxMeasure $ blockCapacityTxMeasure (completeLedgerConfig' ei pcfg) - (unComp st') + (getFlipTickedLedgerState st') txMeasure HardForkLedgerConfig{..} @@ -172,7 +258,7 @@ instance CanHardFork xs => TxLimits (HardForkBlock xs) where SingleEraBlock blk => Index xs blk -> WrapLedgerConfig blk - -> (Product GenTx (Ticked :.: LedgerState)) blk + -> (Product GenTx (FlipTickedLedgerState ValuesMK)) blk -> K (Except (HardForkApplyTxErr xs) (HardForkTxMeasure xs)) blk aux idx cfg (Pair tx' st') = K @@ -187,7 +273,7 @@ instance CanHardFork xs => TxLimits (HardForkBlock xs) where ) $ txMeasure (unwrapLedgerConfig cfg) - (unComp st') + (getFlipTickedLedgerState st') tx' -- | A private type used only to clarify the parameterization of 'applyHelper' @@ -195,9 +281,14 @@ data ApplyHelperMode :: (Type -> Type) -> Type where ModeApply :: ApplyHelperMode GenTx ModeReapply :: ApplyHelperMode WrapValidatedGenTx +-- | 'applyHelper' has to return one of these, depending on the apply mode used. +type family ApplyMK k where + ApplyMK (ApplyHelperMode GenTx) = DiffMK + ApplyMK (ApplyHelperMode WrapValidatedGenTx) = ValuesMK + -- | A private type used only to clarify the definition of 'applyHelper' -data ApplyResult xs blk = ApplyResult { - arState :: Ticked (LedgerState blk) +data ApplyResult xs txIn blk = ApplyResult { + arState :: Ticked1 (LedgerState blk) (ApplyMK (ApplyHelperMode txIn)) , arValidatedTx :: Validated (GenTx (HardForkBlock xs)) } @@ -211,10 +302,10 @@ applyHelper :: forall xs txIn. CanHardFork xs -> WhetherToIntervene -> SlotNo -> txIn (HardForkBlock xs) - -> TickedLedgerState (HardForkBlock xs) + -> TickedLedgerState (HardForkBlock xs) ValuesMK -> Except (HardForkApplyTxErr xs) - ( TickedLedgerState (HardForkBlock xs) + ( TickedLedgerState (HardForkBlock xs) (ApplyMK (ApplyHelperMode txIn)) , Validated (GenTx (HardForkBlock xs)) ) applyHelper mode @@ -248,10 +339,10 @@ applyHelper mode result <- hsequence' $ hcizipWith proxySingle modeApplyCurrent cfgs matched - let _ = result :: State.HardForkState (ApplyResult xs) xs + let _ = result :: State.HardForkState (ApplyResult xs txIn) xs - st' :: State.HardForkState (Ticked :.: LedgerState) xs - st' = (Comp . arState) `hmap` result + st' :: State.HardForkState (FlipTickedLedgerState (ApplyMK (ApplyHelperMode txIn))) xs + st' = (FlipTickedLedgerState . arState) `hmap` result vtx :: Validated (GenTx (HardForkBlock xs)) vtx = hcollapse $ (K . arValidatedTx) `hmap` result @@ -289,29 +380,33 @@ applyHelper mode ModeReapply -> injValidatedTx modeApplyCurrent :: forall blk. - SingleEraBlock blk - => Index xs blk - -> WrapLedgerConfig blk - -> Product txIn (Ticked :.: LedgerState) blk + SingleEraBlock blk + => Index xs blk + -> WrapLedgerConfig blk + -> Product txIn (FlipTickedLedgerState ValuesMK) blk -> ( Except (HardForkApplyTxErr xs) - :.: ApplyResult xs - ) blk - modeApplyCurrent index cfg (Pair tx' (Comp st)) = + :.: ApplyResult xs txIn + ) blk + modeApplyCurrent index cfg (Pair tx' (FlipTickedLedgerState st)) = Comp $ withExcept (injectApplyTxErr index) $ do let lcfg = unwrapLedgerConfig cfg - (st', vtx) <- case mode of - ModeApply -> applyTx lcfg wti slot tx' st + case mode of + ModeApply -> do + (st', vtx) <- applyTx lcfg wti slot tx' st + pure ApplyResult { + arValidatedTx = injectValidatedGenTx index vtx + , arState = st' + } ModeReapply -> do let vtx' = unwrapValidatedGenTx tx' st' <- reapplyTx lcfg slot vtx' st -- provide the given transaction, which was already validated - pure (st', vtx') - pure ApplyResult { - arValidatedTx = injectValidatedGenTx index vtx - , arState = st' - } + pure ApplyResult { + arValidatedTx = injectValidatedGenTx index vtx' + , arState = st' + } newtype instance TxId (GenTx (HardForkBlock xs)) = HardForkGenTxId { getHardForkGenTxId :: OneEraGenTxId xs @@ -350,8 +445,8 @@ instance All HasTxs xs => HasTxs (HardForkBlock xs) where Auxiliary -------------------------------------------------------------------------------} -ledgerInfo :: forall blk. SingleEraBlock blk - => State.Current (Ticked :.: LedgerState) blk -> LedgerEraInfo blk +ledgerInfo :: forall blk mk. SingleEraBlock blk + => State.Current (FlipTickedLedgerState mk) blk -> LedgerEraInfo blk ledgerInfo _ = LedgerEraInfo $ singleEraInfo (Proxy @blk) injectApplyTxErr :: Index xs blk -> ApplyTxErr blk -> HardForkApplyTxErr xs diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/HardFork/Combinator/Node.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/HardFork/Combinator/Node.hs index be360aac4a..4795b673d3 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/HardFork/Combinator/Node.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/HardFork/Combinator/Node.hs @@ -17,8 +17,12 @@ import Ouroboros.Consensus.HardFork.Combinator.Abstract import Ouroboros.Consensus.HardFork.Combinator.AcrossEras import Ouroboros.Consensus.HardFork.Combinator.Basics import Ouroboros.Consensus.HardFork.Combinator.Forging () +import Ouroboros.Consensus.HardFork.Combinator.Ledger + (HardForkHasLedgerTables, HasCanonicalTxIn, + HasHardForkTxOut) import Ouroboros.Consensus.HardFork.Combinator.Ledger.CommonProtocolParams () import Ouroboros.Consensus.HardFork.Combinator.Ledger.PeerSelection () +import Ouroboros.Consensus.HardFork.Combinator.Ledger.Query import Ouroboros.Consensus.HardFork.Combinator.Node.DiffusionPipelining () import Ouroboros.Consensus.HardFork.Combinator.Node.InitStorage () import Ouroboros.Consensus.HardFork.Combinator.Node.Metrics () @@ -58,7 +62,10 @@ getSameConfigValue getValue blockConfig = getSameValue values -------------------------------------------------------------------------------} instance ( CanHardFork xs - -- Instances that must be defined for specific values of @b@: + , HardForkHasLedgerTables xs + , HasCanonicalTxIn xs + , HasHardForkTxOut xs + , BlockSupportsHFLedgerQuery xs , SupportedNetworkProtocolVersion (HardForkBlock xs) , SerialiseHFC xs ) => RunNode (HardForkBlock xs) diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/HardFork/Combinator/Node/InitStorage.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/HardFork/Combinator/Node/InitStorage.hs index 7051cfe684..a01e3bef0a 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/HardFork/Combinator/Node/InitStorage.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/HardFork/Combinator/Node/InitStorage.hs @@ -8,12 +8,14 @@ module Ouroboros.Consensus.HardFork.Combinator.Node.InitStorage () where import Data.Proxy import Data.SOP.BasicFunctors +import Data.SOP.Functors import Data.SOP.Index import Data.SOP.Strict import Ouroboros.Consensus.HardFork.Combinator.Abstract import Ouroboros.Consensus.HardFork.Combinator.AcrossEras import Ouroboros.Consensus.HardFork.Combinator.Basics import qualified Ouroboros.Consensus.HardFork.Combinator.State as State +import Ouroboros.Consensus.Ledger.Abstract (EmptyMK) import Ouroboros.Consensus.Node.InitStorage import Ouroboros.Consensus.Storage.ChainDB.Init (InitChainDB (..)) @@ -60,9 +62,9 @@ instance CanHardFork xs => NodeInitStorage (HardForkBlock xs) where SingleEraBlock blk => Index xs blk -> StorageConfig blk - -> LedgerState blk + -> Flip LedgerState EmptyMK blk -> K (m ()) blk - aux index cfg' currentLedger = K $ + aux index cfg' (Flip currentLedger) = K $ nodeInitChainDB cfg' InitChainDB { addBlock = addBlock initChainDB . injectNS' (Proxy @I) index diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/HardFork/Combinator/Serialisation/Common.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/HardFork/Combinator/Serialisation/Common.hs index bf36dda805..57a493abe2 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/HardFork/Combinator/Serialisation/Common.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/HardFork/Combinator/Serialisation/Common.hs @@ -92,9 +92,10 @@ import Ouroboros.Consensus.HardFork.Combinator.Info import Ouroboros.Consensus.HardFork.Combinator.Ledger.Query import Ouroboros.Consensus.HardFork.Combinator.State import Ouroboros.Consensus.HardFork.Combinator.State.Instances +import Ouroboros.Consensus.Ledger.Query +import Ouroboros.Consensus.Ledger.Tables import Ouroboros.Consensus.Node.NetworkProtocolVersion import Ouroboros.Consensus.Node.Run -import Ouroboros.Consensus.Node.Serialisation (Some (..)) import Ouroboros.Consensus.Storage.Serialisation import Ouroboros.Consensus.TypeFamilyWrappers import Ouroboros.Network.Block (Serialised) @@ -261,6 +262,9 @@ class ( CanHardFork xs , All (DecodeDiskDepIx (NestedCtxt Header)) xs -- Required for 'getHfcBinaryBlockInfo' , All HasBinaryBlockInfo xs + -- LedgerTables on the HardForkBlock might not be compositionally + -- defined, but we need to require this instances for any instantiation. + , CanSerializeLedgerTables (LedgerState (HardForkBlock xs)) ) => SerialiseHFC xs where encodeDiskHfcBlock :: CodecConfig (HardForkBlock xs) @@ -274,14 +278,14 @@ class ( CanHardFork xs decodeDiskHfcBlock :: CodecConfig (HardForkBlock xs) -> forall s. Decoder s (Lazy.ByteString -> HardForkBlock xs) decodeDiskHfcBlock cfg = - fmap (\f -> HardForkBlock . OneEraBlock . f) - $ decodeAnnNS (hcmap pSHFC aux cfgs) + (\f -> HardForkBlock . OneEraBlock . f) + <$> decodeAnnNS (hcmap pSHFC aux cfgs) where cfgs = getPerEraCodecConfig (hardForkCodecConfigPerEra cfg) aux :: SerialiseDiskConstraints blk => CodecConfig blk -> AnnDecoder I blk - aux cfg' = AnnDecoder $ (\f -> I . f) <$> decodeDisk cfg' + aux cfg' = AnnDecoder $ (I .) <$> decodeDisk cfg' -- | Used as the implementation of 'reconstructPrefixLen' for -- 'HardForkBlock'. @@ -412,7 +416,7 @@ encodeTelescope :: SListI xs => NP (f -.-> K Encoding) xs -> HardForkState f xs -> Encoding encodeTelescope es (HardForkState st) = mconcat [ Enc.encodeListLen (1 + fromIntegral ix) - , mconcat $ hcollapse $ SimpleTelescope $ + , mconcat $ hcollapse $ SimpleTelescope (Telescope.bihzipWith (const encPast) encCurrent es st) ] where @@ -643,26 +647,26 @@ undistribSerialisedHeader = depPairFirst (mapNestedCtxt NCS) $ go bs distribQueryIfCurrent :: - Some (QueryIfCurrent xs) - -> NS (SomeSecond BlockQuery) xs -distribQueryIfCurrent = \(Some qry) -> go qry + SomeBlockQuery (QueryIfCurrent xs) + -> NS (SomeBlockQuery :.: BlockQuery) xs +distribQueryIfCurrent = go where - go :: QueryIfCurrent xs result -> NS (SomeSecond BlockQuery) xs - go (QZ qry) = Z (SomeSecond qry) - go (QS qry) = S (go qry) + go :: SomeBlockQuery (QueryIfCurrent xs) -> NS (SomeBlockQuery :.: BlockQuery) xs + go (SomeBlockQuery (QZ qry)) = Z (Comp (SomeBlockQuery qry)) + go (SomeBlockQuery (QS qry)) = S (go (SomeBlockQuery qry)) undistribQueryIfCurrent :: - NS (SomeSecond BlockQuery) xs - -> Some (QueryIfCurrent xs) + NS (SomeBlockQuery :.: BlockQuery) xs + -> SomeBlockQuery (QueryIfCurrent xs) undistribQueryIfCurrent = go where - go :: NS (SomeSecond BlockQuery) xs -> Some (QueryIfCurrent xs) + go :: NS (SomeBlockQuery :.: BlockQuery) xs -> SomeBlockQuery (QueryIfCurrent xs) go (Z qry) = case qry of - SomeSecond qry' -> - Some (QZ qry') + Comp (SomeBlockQuery qry') -> + SomeBlockQuery (QZ qry') go (S qry) = case go qry of - Some qry' -> - Some (QS qry') + SomeBlockQuery qry' -> + SomeBlockQuery (QS qry') {------------------------------------------------------------------------------- Deriving-via support @@ -686,6 +690,6 @@ instance All (Compose Serialise f) xs => Serialise (SerialiseNS f xs) where (fn (K . Serialise.encode))) . getSerialiseNS - decode = fmap SerialiseNS - $ decodeNS (hcpure (Proxy @(Compose Serialise f)) + decode = SerialiseNS + <$> decodeNS (hcpure (Proxy @(Compose Serialise f)) (Comp Serialise.decode)) diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/HardFork/Combinator/Serialisation/SerialiseDisk.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/HardFork/Combinator/Serialisation/SerialiseDisk.hs index 826074ae14..69e350b434 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/HardFork/Combinator/Serialisation/SerialiseDisk.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/HardFork/Combinator/Serialisation/SerialiseDisk.hs @@ -11,6 +11,7 @@ import qualified Data.ByteString.Lazy as Lazy import Data.SOP.BasicFunctors import Data.SOP.Constraint import Data.SOP.Dict (Dict (..), all_NP) +import Data.SOP.Functors import Data.SOP.Strict import Ouroboros.Consensus.Block import Ouroboros.Consensus.HardFork.Combinator.AcrossEras @@ -18,10 +19,10 @@ import Ouroboros.Consensus.HardFork.Combinator.Basics import Ouroboros.Consensus.HardFork.Combinator.Protocol import Ouroboros.Consensus.HardFork.Combinator.Serialisation.Common import Ouroboros.Consensus.HeaderValidation +import Ouroboros.Consensus.Ledger.Basics import Ouroboros.Consensus.Storage.ChainDB import Ouroboros.Consensus.Storage.Serialisation import Ouroboros.Consensus.TypeFamilyWrappers -import Ouroboros.Consensus.Util ((.:)) instance SerialiseHFC xs => SerialiseDiskConstraints (HardForkBlock xs) @@ -117,17 +118,17 @@ instance SerialiseHFC xs cfgs = getPerEraCodecConfig (hardForkCodecConfigPerEra cfg) instance SerialiseHFC xs - => EncodeDisk (HardForkBlock xs) (LedgerState (HardForkBlock xs) )where + => EncodeDisk (HardForkBlock xs) (LedgerState (HardForkBlock xs) EmptyMK) where encodeDisk cfg = - encodeTelescope (hcmap pSHFC (fn . (K .: encodeDisk)) cfgs) + encodeTelescope (hcmap pSHFC (\cfg' -> fn (K . encodeDisk cfg' . unFlip)) cfgs) . hardForkLedgerStatePerEra where cfgs = getPerEraCodecConfig (hardForkCodecConfigPerEra cfg) instance SerialiseHFC xs - => DecodeDisk (HardForkBlock xs) (LedgerState (HardForkBlock xs)) where + => DecodeDisk (HardForkBlock xs) (LedgerState (HardForkBlock xs) EmptyMK) where decodeDisk cfg = fmap HardForkLedgerState - $ decodeTelescope (hcmap pSHFC (Comp . decodeDisk) cfgs) + $ decodeTelescope (hcmap pSHFC (Comp . fmap Flip . decodeDisk) cfgs) where cfgs = getPerEraCodecConfig (hardForkCodecConfigPerEra cfg) diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/HardFork/Combinator/Serialisation/SerialiseNodeToClient.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/HardFork/Combinator/Serialisation/SerialiseNodeToClient.hs index 7eb237f82a..1c0ac66140 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/HardFork/Combinator/Serialisation/SerialiseNodeToClient.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/HardFork/Combinator/Serialisation/SerialiseNodeToClient.hs @@ -37,7 +37,8 @@ import Ouroboros.Consensus.HardFork.Combinator.Ledger.Query import Ouroboros.Consensus.HardFork.Combinator.Mempool import Ouroboros.Consensus.HardFork.Combinator.Serialisation.Common import Ouroboros.Consensus.HardFork.Combinator.Serialisation.SerialiseDisk () -import Ouroboros.Consensus.HardFork.History (EraParamsFormat (..)) +import Ouroboros.Consensus.HardFork.History +import Ouroboros.Consensus.Ledger.Query import Ouroboros.Consensus.Ledger.SupportsMempool (GenTxId) import Ouroboros.Consensus.Node.NetworkProtocolVersion import Ouroboros.Consensus.Node.Run @@ -208,13 +209,13 @@ decodeQueryHardFork = do _ -> fail $ "QueryHardFork: invalid tag " ++ show tag instance SerialiseHFC xs - => SerialiseNodeToClient (HardForkBlock xs) (SomeSecond BlockQuery (HardForkBlock xs)) where - encodeNodeToClient ccfg version (SomeSecond q) = case version of + => SerialiseNodeToClient (HardForkBlock xs) (SomeBlockQuery (BlockQuery (HardForkBlock xs))) where + encodeNodeToClient ccfg version (SomeBlockQuery q) = case version of HardForkNodeToClientDisabled v0 -> case q of QueryIfCurrent qry -> - case distribQueryIfCurrent (Some qry) of - Z qry0 -> encodeNodeToClient (hd ccfgs) v0 qry0 - S later -> throw $ futureEraException (notFirstEra later) + case distribQueryIfCurrent (SomeBlockQuery qry) of + Z (Comp qry0) -> encodeNodeToClient (hd ccfgs) v0 qry0 + S later -> throw $ futureEraException (notFirstEra later) QueryAnytime {} -> throw HardForkEncoderQueryHfcDisabled QueryHardFork {} -> @@ -224,7 +225,7 @@ instance SerialiseHFC xs QueryIfCurrent qry -> mconcat [ Enc.encodeListLen 2 , Enc.encodeWord8 0 - , dispatchEncoder ccfg version (distribQueryIfCurrent (Some qry)) + , dispatchEncoder ccfg version (distribQueryIfCurrent (SomeBlockQuery qry)) ] QueryAnytime qry eraIndex -> mconcat [ Enc.encodeListLen 3 @@ -242,7 +243,7 @@ instance SerialiseHFC xs decodeNodeToClient ccfg version = case version of HardForkNodeToClientDisabled v0 -> - injQueryIfCurrent . Z <$> + injQueryIfCurrent . Z . Comp <$> decodeNodeToClient (hd ccfgs) v0 HardForkNodeToClientEnabled {} -> case isNonEmpty (Proxy @xs) of ProofNonEmpty (_ :: Proxy x') (p :: Proxy xs') -> do @@ -255,40 +256,40 @@ instance SerialiseHFC xs Some (qry :: QueryAnytime result) <- Serialise.decode eraIndex :: EraIndex (x' ': xs') <- Serialise.decode case checkIsNonEmpty p of - Nothing -> fail $ "QueryAnytime requires multiple era" + Nothing -> fail "QueryAnytime requires multiple era" Just (ProofNonEmpty {}) -> - return $ SomeSecond (QueryAnytime qry eraIndex) + return $ SomeBlockQuery (QueryAnytime qry eraIndex) (2, 2) -> do Some (qry :: QueryHardFork xs result) <- decodeQueryHardFork case checkIsNonEmpty p of - Nothing -> fail $ "QueryHardFork requires multiple era" + Nothing -> fail "QueryHardFork requires multiple era" Just (ProofNonEmpty {}) -> - return $ SomeSecond (QueryHardFork qry) + return $ SomeBlockQuery (QueryHardFork qry) _ -> fail $ "HardForkQuery: invalid size and tag" <> show (size, tag) where ccfgs = getPerEraCodecConfig $ hardForkCodecConfigPerEra ccfg - injQueryIfCurrent :: NS (SomeSecond BlockQuery) xs - -> SomeSecond BlockQuery (HardForkBlock xs) + injQueryIfCurrent :: NS (SomeBlockQuery :.: BlockQuery) xs + -> SomeBlockQuery (BlockQuery (HardForkBlock xs)) injQueryIfCurrent ns = case undistribQueryIfCurrent ns of - Some q -> SomeSecond (QueryIfCurrent q) + SomeBlockQuery q -> SomeBlockQuery (QueryIfCurrent q) {------------------------------------------------------------------------------- Results -------------------------------------------------------------------------------} instance SerialiseHFC xs - => SerialiseResult (HardForkBlock xs) (BlockQuery (HardForkBlock xs)) where - encodeResult ccfg version (QueryIfCurrent qry) = + => SerialiseResult' (HardForkBlock xs) BlockQuery where + encodeResult' ccfg version (QueryIfCurrent qry) = case isNonEmpty (Proxy @xs) of ProofNonEmpty {} -> encodeEitherMismatch version $ case (ccfgs, version, qry) of (c0 :* _, HardForkNodeToClientDisabled v0, QZ qry') -> - encodeResult c0 v0 qry' + encodeResult' c0 v0 qry' (_, HardForkNodeToClientDisabled _, QS qry') -> throw $ futureEraException (hardForkQueryInfo qry') (_, HardForkNodeToClientEnabled _ versions, _) -> @@ -296,18 +297,18 @@ instance SerialiseHFC xs where ccfgs = getPerEraCodecConfig $ hardForkCodecConfigPerEra ccfg - encodeResult _ _ (QueryAnytime qry _) = encodeQueryAnytimeResult qry - encodeResult _ version (QueryHardFork qry) = encodeQueryHardForkResult epf qry + encodeResult' _ _ (QueryAnytime qry _) = encodeQueryAnytimeResult qry + encodeResult' _ version (QueryHardFork qry) = encodeQueryHardForkResult epf qry where epf = eraParamsFormatFromVersion version - decodeResult ccfg version (QueryIfCurrent qry) = + decodeResult' ccfg version (QueryIfCurrent qry) = case isNonEmpty (Proxy @xs) of ProofNonEmpty {} -> decodeEitherMismatch version $ case (ccfgs, version, qry) of (c0 :* _, HardForkNodeToClientDisabled v0, QZ qry') -> - decodeResult c0 v0 qry' + decodeResult' c0 v0 qry' (_, HardForkNodeToClientDisabled _, QS qry') -> throw $ futureEraException (hardForkQueryInfo qry') (_, HardForkNodeToClientEnabled _ versions, _) -> @@ -315,8 +316,8 @@ instance SerialiseHFC xs where ccfgs = getPerEraCodecConfig $ hardForkCodecConfigPerEra ccfg - decodeResult _ _ (QueryAnytime qry _) = decodeQueryAnytimeResult qry - decodeResult _ version (QueryHardFork qry) = decodeQueryHardForkResult epf qry + decodeResult' _ _ (QueryAnytime qry _) = decodeQueryAnytimeResult qry + decodeResult' _ version (QueryHardFork qry) = decodeQueryHardForkResult epf qry where epf = eraParamsFormatFromVersion version @@ -331,15 +332,15 @@ encodeQueryIfCurrentResult :: All SerialiseConstraintsHFC xs => NP CodecConfig xs -> NP EraNodeToClientVersion xs - -> QueryIfCurrent xs result + -> QueryIfCurrent xs fp result -> result -> Encoding encodeQueryIfCurrentResult (c :* _) (EraNodeToClientEnabled v :* _) (QZ qry) = - encodeResult c v qry + encodeResult' c v qry encodeQueryIfCurrentResult (_ :* _) (EraNodeToClientDisabled :* _) (QZ qry) = qryDisabledEra qry where - qryDisabledEra :: forall blk result. SingleEraBlock blk - => BlockQuery blk result -> result -> Encoding + qryDisabledEra :: forall blk fp result. SingleEraBlock blk + => BlockQuery blk fp result -> result -> Encoding qryDisabledEra _ _ = throw $ disabledEraException (Proxy @blk) encodeQueryIfCurrentResult (_ :* cs) (_ :* vs) (QS qry) = encodeQueryIfCurrentResult cs vs qry @@ -350,15 +351,15 @@ decodeQueryIfCurrentResult :: All SerialiseConstraintsHFC xs => NP CodecConfig xs -> NP EraNodeToClientVersion xs - -> QueryIfCurrent xs result - -> Decoder s result + -> QueryIfCurrent xs fp result + -> (forall s. Decoder s result) decodeQueryIfCurrentResult (c :* _) (EraNodeToClientEnabled v :* _) (QZ qry) = - decodeResult c v qry + decodeResult' c v qry decodeQueryIfCurrentResult (_ :* _) (EraNodeToClientDisabled :* _) (QZ qry) = qryDisabledEra qry where - qryDisabledEra :: forall blk result. SingleEraBlock blk - => BlockQuery blk result -> forall s. Decoder s result + qryDisabledEra :: forall blk fp result. SingleEraBlock blk + => BlockQuery blk fp result -> forall s. Decoder s result qryDisabledEra _ = fail . show $ disabledEraException (Proxy @blk) decodeQueryIfCurrentResult (_ :* cs) (_ :* vs) (QS qry) = decodeQueryIfCurrentResult cs vs qry diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/HardFork/Combinator/State.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/HardFork/Combinator/State.hs index a4fc808490..babaa31569 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/HardFork/Combinator/State.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/HardFork/Combinator/State.hs @@ -34,6 +34,7 @@ import Data.Proxy import Data.SOP.BasicFunctors import Data.SOP.Constraint import Data.SOP.Counting (getExactly) +import Data.SOP.Functors (Flip (..)) import Data.SOP.InPairs (InPairs, Requiring (..)) import qualified Data.SOP.InPairs as InPairs import Data.SOP.Strict @@ -50,7 +51,7 @@ import Ouroboros.Consensus.HardFork.Combinator.State.Types as X import Ouroboros.Consensus.HardFork.Combinator.Translation import qualified Ouroboros.Consensus.HardFork.History as History import Ouroboros.Consensus.Ledger.Abstract hiding (getTip) -import Ouroboros.Consensus.Util ((.:)) +import Ouroboros.Consensus.Ledger.Tables.Utils import Prelude hiding (sequence) {------------------------------------------------------------------------------- @@ -106,7 +107,7 @@ recover = mostRecentTransitionInfo :: All SingleEraBlock xs => HardForkLedgerConfig xs - -> HardForkState LedgerState xs + -> HardForkState (Flip LedgerState mk) xs -> TransitionInfo mostRecentTransitionInfo HardForkLedgerConfig{..} st = hcollapse $ @@ -119,19 +120,19 @@ mostRecentTransitionInfo HardForkLedgerConfig{..} st = where cfgs = getPerEraLedgerConfig hardForkLedgerConfigPerEra - getTransition :: SingleEraBlock blk - => WrapPartialLedgerConfig blk - -> K History.EraParams blk - -> Current LedgerState blk - -> K TransitionInfo blk - getTransition cfg (K eraParams) Current{..} = K $ - case singleEraTransition' cfg eraParams currentStart currentState of - Nothing -> TransitionUnknown (ledgerTipSlot currentState) + getTransition :: SingleEraBlock blk + => WrapPartialLedgerConfig blk + -> K History.EraParams blk + -> Current (Flip LedgerState mk) blk + -> K TransitionInfo blk + getTransition cfg (K eraParams) Current{currentState = Flip curState, ..} = K $ + case singleEraTransition' cfg eraParams currentStart curState of + Nothing -> TransitionUnknown (ledgerTipSlot curState) Just e -> TransitionKnown e reconstructSummaryLedger :: All SingleEraBlock xs => HardForkLedgerConfig xs - -> HardForkState LedgerState xs + -> HardForkState (Flip LedgerState mk) xs -> History.Summary xs reconstructSummaryLedger cfg@HardForkLedgerConfig{..} st = reconstructSummary @@ -145,7 +146,7 @@ reconstructSummaryLedger cfg@HardForkLedgerConfig{..} st = -- It should not be stored. epochInfoLedger :: All SingleEraBlock xs => HardForkLedgerConfig xs - -> HardForkState LedgerState xs + -> HardForkState (Flip LedgerState mk) xs -> EpochInfo (Except PastHorizonException) epochInfoLedger cfg st = History.summaryToEpochInfo $ @@ -168,24 +169,69 @@ epochInfoPrecomputedTransitionInfo shape transition st = Extending -------------------------------------------------------------------------------} --- | Extend the telescope until the specified slot is within the era at the tip -extendToSlot :: forall xs. CanHardFork xs +-- | Extend the telescope until the specified slot is within the era at the tip. +-- +-- Note that transitioning to a later era might create new values in the ledger +-- tables, therefore the result of this function is a @DiffMK@. +-- +-- If we are crossing no era boundaries, this whole function is a no-op that +-- only creates an empty @DiffMK@, because the @Telescope.extend@ function will +-- do nothing. +-- +-- If we are crossing one era boundary, the ledger tables might be populated +-- with whatever @translateLedgerStateWith@ returns. +-- +-- If we are crossing multiple era boundaries, the diffs generated when crossing +-- an era boundary will be prepended to the ones produced by later era +-- boundaries and, in order to all match the resulting era, they will be +-- translated to later eras. +-- +-- This means in particular that if we extend from @era1@ to @era3@ going +-- through @era2@, we will: +-- +-- 1. translate the ledger state from @era1@ to @era2@, which produces a @era2@ +-- ledger state together with a some set of differences. +-- +-- 2. keep the @era2@ diffs aside, and translate the @era2@ ledger state without +-- ledger tables, which produces a @era3@ ledger state together with a set of +-- @era3@ differences. +-- +-- 3. Translate the @era2@ diffs to @era3@ differences, and prepend them to the +-- ones created in the step 2. +-- +-- 4. Attach the diffs resulting from step 3 to the @era3@ ledger state from +-- step 2, and return it. +extendToSlot :: forall xs. + (CanHardFork xs) => HardForkLedgerConfig xs -> SlotNo - -> HardForkState LedgerState xs -> HardForkState LedgerState xs + -> HardForkState (Flip LedgerState EmptyMK) xs + -> HardForkState (Flip LedgerState DiffMK) xs extendToSlot ledgerCfg@HardForkLedgerConfig{..} slot ledgerSt@(HardForkState st) = - HardForkState . unI + HardForkState + . unI . Telescope.extend - ( InPairs.hmap (\f -> Require $ \(K t) - -> Extend $ \cur - -> I $ howExtend f t cur) - $ translate + ( InPairs.hczipWith proxySingle (\f f' -> Require $ \(K t) + -> Extend $ \cur + -> I $ howExtend f f' t cur) + translateLS + translateLT ) (hczipWith proxySingle (fn .: whenExtend) pcfgs (getExactly (History.getShape hardForkLedgerConfigShape))) + -- In order to make this an automorphism, as required by 'Telescope.extend', + -- we have to promote the input to @DiffMK@ albeit it being empty. + $ hcmap + proxySingle + (\c -> c { currentState = Flip + . flip withLedgerTables emptyLedgerTables + . unFlip + . currentState + $ c } + ) $ st where pcfgs = getPerEraLedgerConfig hardForkLedgerConfigPerEra @@ -193,17 +239,17 @@ extendToSlot ledgerCfg@HardForkLedgerConfig{..} slot ledgerSt@(HardForkState st) ei = epochInfoLedger ledgerCfg ledgerSt -- Return the end of this era if we should transition to the next - whenExtend :: SingleEraBlock blk - => WrapPartialLedgerConfig blk - -> K History.EraParams blk - -> Current LedgerState blk - -> (Maybe :.: K History.Bound) blk + whenExtend :: SingleEraBlock blk + => WrapPartialLedgerConfig blk + -> K History.EraParams blk + -> Current (Flip LedgerState DiffMK) blk + -> (Maybe :.: K History.Bound) blk whenExtend pcfg (K eraParams) cur = Comp $ K <$> do transition <- singleEraTransition' pcfg eraParams (currentStart cur) - (currentState cur) + (unFlip $ currentState cur) let endBound = History.mkUpperBound eraParams (currentStart cur) @@ -211,23 +257,44 @@ extendToSlot ledgerCfg@HardForkLedgerConfig{..} slot ledgerSt@(HardForkState st) guard (slot >= History.boundSlot endBound) return endBound - howExtend :: Translate LedgerState blk blk' + howExtend :: (HasLedgerTables (LedgerState blk), HasLedgerTables (LedgerState blk')) + => TranslateLedgerState blk blk' + -> TranslateLedgerTables blk blk' -> History.Bound - -> Current LedgerState blk - -> (K Past blk, Current LedgerState blk') - howExtend f currentEnd cur = ( + -> Current (Flip LedgerState DiffMK) blk + -> (K Past blk, Current (Flip LedgerState DiffMK) blk') + howExtend f f' currentEnd cur = ( K Past { pastStart = currentStart cur , pastEnd = currentEnd } , Current { currentStart = currentEnd - , currentState = translateWith f - (History.boundEpoch currentEnd) - (currentState cur) + , currentState = + Flip + -- We need to bring back the diffs provided by previous + -- translations. Note that if there is only one translation or + -- if the previous translations don't add any new tables this + -- will just be a no-op. See the haddock for + -- 'translateLedgerTablesWith' and 'extendToSlot' for more + -- information. + . prependDiffs ( translateLedgerTablesWith f' + . projectLedgerTables + . unFlip + . currentState + $ cur + ) + . translateLedgerStateWith f (History.boundEpoch currentEnd) + . forgetLedgerTables + . unFlip + . currentState + $ cur } ) - translate :: InPairs (Translate LedgerState) xs - translate = InPairs.requiringBoth cfgs $ + translateLS :: InPairs TranslateLedgerState xs + translateLS = InPairs.requiringBoth cfgs $ translateLedgerState hardForkEraTranslation + + translateLT :: InPairs TranslateLedgerTables xs + translateLT = translateLedgerTables hardForkEraTranslation diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/HardFork/Combinator/State/Types.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/HardFork/Combinator/State/Types.hs index 8821d1af08..d3fbceaa1f 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/HardFork/Combinator/State/Types.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/HardFork/Combinator/State/Types.hs @@ -14,9 +14,15 @@ module Ouroboros.Consensus.HardFork.Combinator.State.Types ( , CrossEraForecaster (..) , TransitionInfo (..) , Translate (..) + , TranslateLedgerState (..) + , TranslateLedgerTables (..) + , TranslateTxIn (..) + , TranslateTxOut (..) + , translateLedgerTablesWith ) where import Control.Monad.Except +import qualified Data.Map.Strict as Map import Data.SOP.BasicFunctors import Data.SOP.Constraint import Data.SOP.Strict @@ -27,7 +33,8 @@ import NoThunks.Class (NoThunks (..)) import Ouroboros.Consensus.Block import Ouroboros.Consensus.Forecast import Ouroboros.Consensus.HardFork.History (Bound) -import Prelude +import Ouroboros.Consensus.Ledger.Basics +import qualified Ouroboros.Consensus.Ledger.Tables.Diff as Diff {------------------------------------------------------------------------------- Types @@ -76,7 +83,7 @@ import Prelude -- used in this case. newtype HardForkState f xs = HardForkState { getHardForkState :: Telescope (K Past) (Current f) xs - } + } deriving (Generic) -- | Information about the current era data Current f blk = Current { @@ -125,10 +132,92 @@ newtype CrossEraForecaster state view x y = CrossEraForecaster { crossEraForecastWith :: Bound -- 'Bound' of the transition (start of the new era) -> SlotNo -- 'SlotNo' we're constructing a forecast for - -> state x + -> state x EmptyMK -> Except OutsideForecastRange (view y) } +-- | Translate a 'LedgerState' across an era transition. +newtype TranslateLedgerState x y = TranslateLedgerState { + -- | How to translate a 'LedgerState' during the era transition. + -- + -- When translating between eras, it can be the case that values are modified, + -- thus requiring this to be a @DiffMK@ on the return type. If no tables are + -- populated, normally this will be filled with @emptyLedgerTables@. + -- + -- To make a clear example, in the context of Cardano, there are currently two + -- cases in which this is of vital importance: Byron->Shelley and + -- Shelley->Allegra. + -- + -- On Byron->Shelley we basically dump the whole UTxO set as insertions + -- because the LedgerTables only exist for Shelley blocks. + -- + -- On Shelley->Allegra, there were a bunch of UTxOs that were moved around, + -- related to the AVVMs. In particular they were deleted and included in the + -- reserves. See the code that performs the translation Shelley->Allegra for + -- more information. + translateLedgerStateWith :: + EpochNo + -> LedgerState x EmptyMK + -> LedgerState y DiffMK + } + +-- | Transate a 'LedgerTables' across an era transition. +data TranslateLedgerTables x y = TranslateLedgerTables { + -- | Translate a 'TxIn' across an era transition. + -- + -- See 'translateLedgerTablesWith'. + translateTxInWith :: !(Key (LedgerState x) -> Key (LedgerState y)) + + -- | Translate a 'TxOut' across an era transition. + -- + -- See 'translateLedgerTablesWith'. + , translateTxOutWith :: !(Value (LedgerState x) -> Value (LedgerState y)) + } + +newtype TranslateTxIn x y = TranslateTxIn (Key (LedgerState x) -> Key (LedgerState y)) + +newtype TranslateTxOut x y = TranslateTxOut (Value (LedgerState x) -> Value (LedgerState y)) + +-- | Translate a 'LedgerTables' across an era transition. +-- +-- To translate 'LedgerTable's, it's sufficient to know how to translate +-- 'TxIn's and 'TxOut's. Use 'translateLedgerTablesWith' to translate +-- 'LedgerTable's using 'translateTxInWith' and 'translateTxOutWith'. +-- +-- This is a rather technical subtlety. When performing a ledger state +-- translation, the provided input ledger state will be initially populated with +-- a @emptyLedgerTables@. This step is required so that the operation provided +-- to 'Telescope.extend' is an automorphism. +-- +-- If we only extend by one era, this function is a no-op, as the input will be +-- empty ledger states. However, if we extend across multiple eras, previous +-- eras might populate tables thus creating values that now need to be +-- translated to newer eras. This function fills that hole and allows us to +-- promote tables from one era into tables from the next era. +-- +-- TODO(jdral): this is not optimal. If either 'translateTxInWith' or +-- 'translateTxOutWith' is a no-op ('id'), mapping over the diff with those +-- functions is also equivalent to a no-op. However, we are still traversing the +-- map in both cases. If necessary for performance reasons, this code could be +-- optimised to skip the 'Map.mapKeys' step and/or 'Map.map' step if +-- 'translateTxInWith' and/or 'translateTxOutWith' are no-ops. +translateLedgerTablesWith :: + Ord (Key (LedgerState y)) + => TranslateLedgerTables x y + -> LedgerTables (LedgerState x) DiffMK + -> LedgerTables (LedgerState y) DiffMK +translateLedgerTablesWith f = + LedgerTables + . DiffMK + . Diff.Diff + . Map.mapKeys (translateTxInWith f) + . getDiff + . getDiffMK + . mapMK (translateTxOutWith f) + . getLedgerTables + where + getDiff (Diff.Diff m) = m + -- | Knowledge in a particular era of the transition to the next era data TransitionInfo = -- | No transition is yet known for this era diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/HardFork/Combinator/Translation.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/HardFork/Combinator/Translation.hs index 1528b9a578..2825ae0b74 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/HardFork/Combinator/Translation.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/HardFork/Combinator/Translation.hs @@ -1,32 +1,45 @@ {-# LANGUAGE DataKinds #-} {-# LANGUAGE DerivingVia #-} +{-# LANGUAGE FlexibleContexts #-} module Ouroboros.Consensus.HardFork.Combinator.Translation ( -- * Translate from one era to the next EraTranslation (..) + , ipTranslateTxOut , trivialEraTranslation ) where +import Data.SOP.Constraint import Data.SOP.InPairs (InPairs (..), RequiringBoth (..)) +import qualified Data.SOP.InPairs as InPairs import NoThunks.Class (NoThunks, OnlyCheckWhnfNamed (..)) import Ouroboros.Consensus.HardFork.Combinator.State.Types import Ouroboros.Consensus.Ledger.Abstract import Ouroboros.Consensus.TypeFamilyWrappers + {------------------------------------------------------------------------------- Translate from one era to the next -------------------------------------------------------------------------------} data EraTranslation xs = EraTranslation { - translateLedgerState :: InPairs (RequiringBoth WrapLedgerConfig (Translate LedgerState)) xs + translateLedgerState :: InPairs (RequiringBoth WrapLedgerConfig TranslateLedgerState ) xs + , translateLedgerTables :: InPairs TranslateLedgerTables xs , translateChainDepState :: InPairs (RequiringBoth WrapConsensusConfig (Translate WrapChainDepState)) xs , crossEraForecast :: InPairs (RequiringBoth WrapLedgerConfig (CrossEraForecaster LedgerState WrapLedgerView)) xs } deriving NoThunks via OnlyCheckWhnfNamed "EraTranslation" (EraTranslation xs) +ipTranslateTxOut :: + All Top xs + => EraTranslation xs + -> InPairs TranslateTxOut xs +ipTranslateTxOut = InPairs.hmap (TranslateTxOut . translateTxOutWith) . translateLedgerTables + trivialEraTranslation :: EraTranslation '[blk] trivialEraTranslation = EraTranslation { translateLedgerState = PNil + , translateLedgerTables = PNil , crossEraForecast = PNil , translateChainDepState = PNil } diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/HeaderStateHistory.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/HeaderStateHistory.hs index 0fc4be977d..2f151d10e6 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/HeaderStateHistory.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/HeaderStateHistory.hs @@ -48,6 +48,7 @@ import Ouroboros.Consensus.HeaderValidation hiding (validateHeader) import qualified Ouroboros.Consensus.HeaderValidation as HeaderValidation import Ouroboros.Consensus.Ledger.Abstract import Ouroboros.Consensus.Ledger.Extended +import Ouroboros.Consensus.Ledger.Tables.Utils (applyDiffs) import Ouroboros.Consensus.Protocol.Abstract import Ouroboros.Consensus.Util.CallStack (HasCallStack) import Ouroboros.Network.AnchoredSeq (Anchorable, AnchoredSeq (..)) @@ -191,7 +192,7 @@ mkHeaderStateWithTimeFromSummary summary hst = mkHeaderStateWithTime :: (HasCallStack, HasHardForkHistory blk, HasAnnTip blk) => LedgerConfig blk - -> ExtLedgerState blk + -> ExtLedgerState blk mk -> HeaderStateWithTime blk mkHeaderStateWithTime lcfg (ExtLedgerState lst hst) = mkHeaderStateWithTimeFromSummary summary hst @@ -245,7 +246,7 @@ fromChain :: , HasAnnTip blk ) => TopLevelConfig blk - -> ExtLedgerState blk + -> ExtLedgerState blk ValuesMK -- ^ Initial ledger state -> Chain blk -> HeaderStateHistory blk @@ -255,7 +256,7 @@ fromChain cfg initState chain = anchorSnapshot NE.:| snapshots = fmap (mkHeaderStateWithTime (configLedger cfg)) . NE.scanl - (flip (tickThenReapply (ExtLedgerCfg cfg))) + (\st blk -> applyDiffs st $ tickThenReapply (ExtLedgerCfg cfg) blk st) initState . Chain.toOldestFirst $ chain diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Ledger/Abstract.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Ledger/Abstract.hs index d78c5692d0..5542ef5eff 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Ledger/Abstract.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Ledger/Abstract.hs @@ -40,8 +40,9 @@ import Data.Kind (Type) import GHC.Stack (HasCallStack) import Ouroboros.Consensus.Block.Abstract import Ouroboros.Consensus.Ledger.Basics +import Ouroboros.Consensus.Ledger.Tables.Utils import Ouroboros.Consensus.Ticked -import Ouroboros.Consensus.Util (repeatedly, repeatedlyM, (..:)) +import Ouroboros.Consensus.Util (repeatedly, repeatedlyM) -- | " Validated " transaction or block -- @@ -77,6 +78,8 @@ class ( IsLedger l , HeaderHash l ~ HeaderHash blk , HasHeader blk , HasHeader (Header blk) + , HasLedgerTables l + , HasLedgerTables (Ticked1 l) ) => ApplyBlock l blk where -- | Apply a block to the ledger state. @@ -87,8 +90,8 @@ class ( IsLedger l HasCallStack => LedgerCfg l -> blk - -> Ticked l - -> Except (LedgerErr l) (LedgerResult l l) + -> Ticked1 l ValuesMK + -> Except (LedgerErr l) (LedgerResult l (l DiffMK)) -- | Re-apply a block to the very same ledger state it was applied in before. -- @@ -103,8 +106,12 @@ class ( IsLedger l HasCallStack => LedgerCfg l -> blk - -> Ticked l - -> LedgerResult l l + -> Ticked1 l ValuesMK + -> LedgerResult l (l DiffMK) + + -- | Given a block, get the key-sets that we need to apply it to a ledger + -- state. + getBlockKeySets :: blk -> LedgerTables l KeysMK -- | Interaction with the ledger layer class ApplyBlock (LedgerState blk) blk => UpdateLedger blk @@ -118,8 +125,8 @@ applyLedgerBlock :: (ApplyBlock l blk, HasCallStack) => LedgerCfg l -> blk - -> Ticked l - -> Except (LedgerErr l) l + -> Ticked1 l ValuesMK + -> Except (LedgerErr l) (l DiffMK) applyLedgerBlock = fmap lrResult ..: applyBlockLedgerResult -- | 'lrResult' after 'reapplyBlockLedgerResult' @@ -127,63 +134,63 @@ reapplyLedgerBlock :: (ApplyBlock l blk, HasCallStack) => LedgerCfg l -> blk - -> Ticked l - -> l + -> Ticked1 l ValuesMK + -> l DiffMK reapplyLedgerBlock = lrResult ..: reapplyBlockLedgerResult tickThenApplyLedgerResult :: ApplyBlock l blk => LedgerCfg l -> blk - -> l - -> Except (LedgerErr l) (LedgerResult l l) + -> l ValuesMK + -> Except (LedgerErr l) (LedgerResult l (l DiffMK)) tickThenApplyLedgerResult cfg blk l = do - let lrTick = applyChainTickLedgerResult cfg (blockSlot blk) l - lrBlock <- applyBlockLedgerResult cfg blk (lrResult lrTick) + let lrTick = applyChainTickLedgerResult cfg (blockSlot blk) (forgetLedgerTables l) + lrBlock <- applyBlockLedgerResult cfg blk (applyDiffForKeys l (getBlockKeySets blk) (lrResult lrTick)) pure LedgerResult { lrEvents = lrEvents lrTick <> lrEvents lrBlock - , lrResult = lrResult lrBlock + , lrResult = prependDiffs (lrResult lrTick) (lrResult lrBlock) } tickThenReapplyLedgerResult :: ApplyBlock l blk => LedgerCfg l -> blk - -> l - -> LedgerResult l l + -> l ValuesMK + -> LedgerResult l (l DiffMK) tickThenReapplyLedgerResult cfg blk l = - let lrTick = applyChainTickLedgerResult cfg (blockSlot blk) l - lrBlock = reapplyBlockLedgerResult cfg blk (lrResult lrTick) + let lrTick = applyChainTickLedgerResult cfg (blockSlot blk) (forgetLedgerTables l) + lrBlock = reapplyBlockLedgerResult cfg blk (applyDiffForKeys l (getBlockKeySets blk) (lrResult lrTick)) in LedgerResult { lrEvents = lrEvents lrTick <> lrEvents lrBlock - , lrResult = lrResult lrBlock + , lrResult = prependDiffs (lrResult lrTick) (lrResult lrBlock) } tickThenApply :: ApplyBlock l blk => LedgerCfg l -> blk - -> l - -> Except (LedgerErr l) l + -> l ValuesMK + -> Except (LedgerErr l) (l DiffMK) tickThenApply = fmap lrResult ..: tickThenApplyLedgerResult tickThenReapply :: ApplyBlock l blk => LedgerCfg l -> blk - -> l - -> l + -> l ValuesMK + -> l DiffMK tickThenReapply = lrResult ..: tickThenReapplyLedgerResult foldLedger :: ApplyBlock l blk - => LedgerCfg l -> [blk] -> l -> Except (LedgerErr l) l -foldLedger = repeatedlyM . tickThenApply + => LedgerCfg l -> [blk] -> l ValuesMK -> Except (LedgerErr l) (l ValuesMK) +foldLedger cfg = repeatedlyM (\blk state -> applyDiffForKeys state (getBlockKeySets blk) <$> tickThenApply cfg blk state) refoldLedger :: ApplyBlock l blk - => LedgerCfg l -> [blk] -> l -> l -refoldLedger = repeatedly . tickThenReapply + => LedgerCfg l -> [blk] -> l ValuesMK -> l ValuesMK +refoldLedger cfg = repeatedly (\blk state -> applyDiffForKeys state (getBlockKeySets blk) $ tickThenReapply cfg blk state) {------------------------------------------------------------------------------- Short-hand @@ -191,15 +198,15 @@ refoldLedger = repeatedly . tickThenReapply ledgerTipPoint :: UpdateLedger blk - => LedgerState blk -> Point blk + => LedgerState blk mk -> Point blk ledgerTipPoint = castPoint . getTip ledgerTipHash :: UpdateLedger blk - => LedgerState blk -> ChainHash blk + => LedgerState blk mk -> ChainHash blk ledgerTipHash = pointHash . ledgerTipPoint ledgerTipSlot :: UpdateLedger blk - => LedgerState blk -> WithOrigin SlotNo + => LedgerState blk mk -> WithOrigin SlotNo ledgerTipSlot = pointSlot . ledgerTipPoint diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Ledger/Basics.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Ledger/Basics.hs index b7b6eca434..fe209017ab 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Ledger/Basics.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Ledger/Basics.hs @@ -1,5 +1,13 @@ +{-# LANGUAGE CPP #-} +#if __GLASGOW_HASKELL__ < 900 +{-# LANGUAGE DataKinds #-} +#endif {-# LANGUAGE DeriveTraversable #-} {-# LANGUAGE FlexibleContexts #-} +{-# LANGUAGE MultiParamTypeClasses #-} +{-# LANGUAGE QuantifiedConstraints #-} +{-# LANGUAGE RankNTypes #-} +{-# LANGUAGE StandaloneKindSignatures #-} {-# LANGUAGE TypeFamilies #-} {-# LANGUAGE TypeOperators #-} @@ -8,55 +16,69 @@ -- Normally this is imported from "Ouroboros.Consensus.Ledger.Abstract". We -- pull this out to avoid circular module dependencies. module Ouroboros.Consensus.Ledger.Basics ( - -- * GetTip - GetTip (..) - , getTipHash - , getTipSlot + -- * The 'LedgerState' definition + LedgerCfg + , LedgerState + , TickedLedgerState + -- * Definition of a ledger independent of a choice of block + , IsLedger (..) + , applyChainTick -- * Ledger Events , LedgerResult (..) , VoidLedgerEvent , castLedgerResult , embedLedgerResult , pureLedgerResult - -- * Definition of a ledger independent of a choice of block - , IsLedger (..) - , LedgerCfg - , applyChainTick - -- * Link block to its ledger + -- * GetTip + , GetTip (..) + , GetTipSTM (..) + , getTipHash + , getTipM + , getTipSlot + -- * Associated types by block type , LedgerConfig , LedgerError - , LedgerState - , TickedLedgerState + -- * Re-exports + , module Ouroboros.Consensus.Ledger.Tables ) where -import Data.Kind (Type) -import NoThunks.Class (NoThunks) +import Data.Kind (Constraint, Type) import Ouroboros.Consensus.Block.Abstract +import Ouroboros.Consensus.Ledger.Tables import Ouroboros.Consensus.Ticked -import Ouroboros.Consensus.Util ((..:)) +import Ouroboros.Consensus.Util.IOLike {------------------------------------------------------------------------------- Tip -------------------------------------------------------------------------------} +type GetTip :: LedgerStateKind -> Constraint class GetTip l where -- | Point of the most recently applied block -- -- Should be 'GenesisPoint' when no blocks have been applied yet - getTip :: l -> Point l + getTip :: forall mk. l mk -> Point l -getTipHash :: GetTip l => l -> ChainHash l +getTipHash :: GetTip l => l mk -> ChainHash l getTipHash = pointHash . getTip -getTipSlot :: GetTip l => l -> WithOrigin SlotNo +getTipSlot :: GetTip l => l mk -> WithOrigin SlotNo getTipSlot = pointSlot . getTip +type GetTipSTM :: (Type -> Type) -> Type -> Constraint +class GetTipSTM m l where + getTipSTM :: l -> STM m (Point l) + +getTipM :: (GetTipSTM m l, MonadSTM m) => l -> m (Point l) +getTipM = atomically . getTipSTM + {------------------------------------------------------------------------------- Events directly from the ledger -------------------------------------------------------------------------------} -- | A 'Data.Void.Void' isomorph for explicitly declaring that some ledger has -- no events +type VoidLedgerEvent :: LedgerStateKind -> Type data VoidLedgerEvent l -- | The result of invoke a ledger function that does validation @@ -95,12 +117,14 @@ pureLedgerResult a = LedgerResult { -- | Static environment required for the ledger -- -- Types that inhabit this family will come from the Ledger code. +type LedgerCfg :: LedgerStateKind -> Type type family LedgerCfg l :: Type +type IsLedger :: LedgerStateKind -> Constraint class ( -- Requirements on the ledger state itself - Show l - , Eq l - , NoThunks l + forall mk. EqMK mk => Eq (l mk) + , forall mk. NoThunksMK mk => NoThunks (l mk) + , forall mk. ShowMK mk => Show (l mk) -- Requirements on 'LedgerCfg' , NoThunks (LedgerCfg l) -- Requirements on 'LedgerErr' @@ -111,8 +135,8 @@ class ( -- Requirements on the ledger state itself -- -- See comment for 'applyChainTickLedgerResult' about the tip of the -- ticked ledger. - , GetTip l - , GetTip (Ticked l) + , GetTip l + , GetTip (Ticked1 l) ) => IsLedger l where -- | Errors that can arise when updating the ledger -- @@ -145,23 +169,31 @@ class ( -- Requirements on the ledger state itself -- it would mean a /previous/ block set up the ledger state in such a way -- that as soon as a certain slot was reached, /any/ block would be invalid. -- + -- Ticking a ledger state may not use any data from the 'LedgerTables', + -- however it might produce differences in the tables, in particular because + -- era transitions happen when ticking a ledger state. + -- -- PRECONDITION: The slot number must be strictly greater than the slot at -- the tip of the ledger (except for EBBs, obviously..). -- -- NOTE: 'applyChainTickLedgerResult' should /not/ change the tip of the -- underlying ledger state, which should still refer to the most recent - -- applied /block/. In other words, we should have + -- applied /block/. In other words, we should have: -- - -- > ledgerTipPoint (applyChainTick cfg slot st) - -- > == ledgerTipPoint st + -- prop> ledgerTipPoint (applyChainTick cfg slot st) == ledgerTipPoint st applyChainTickLedgerResult :: LedgerCfg l -> SlotNo - -> l - -> LedgerResult l (Ticked l) + -> l EmptyMK + -> LedgerResult l (Ticked1 l DiffMK) -- | 'lrResult' after 'applyChainTickLedgerResult' -applyChainTick :: IsLedger l => LedgerCfg l -> SlotNo -> l -> Ticked l +applyChainTick :: + IsLedger l + => LedgerCfg l + -> SlotNo + -> l EmptyMK + -> Ticked1 l DiffMK applyChainTick = lrResult ..: applyChainTickLedgerResult {------------------------------------------------------------------------------- @@ -170,20 +202,27 @@ applyChainTick = lrResult ..: applyChainTickLedgerResult -- | Ledger state associated with a block -- --- This is the Consensus notion of a /ledger state/. Each block type is +-- This is the Consensus notion of a Ledger /ledger state/. Each block type is -- associated with one of the Ledger types for the /ledger state/. Virtually -- every concept in this codebase revolves around this type, or the referenced --- @blk@. Whenever we use the type variable @l@, we intend to denote that the +-- @blk@. Whenever we use the type variable @l@ we intend to signal that the -- expected instantiation is either a 'LedgerState' or some wrapper over it -- (like the 'Ouroboros.Consensus.Ledger.Extended.ExtLedgerState'). -- +-- This type is parametrized over @mk :: 'MapKind'@ to express the +-- 'LedgerTables' contained in such a 'LedgerState'. See 'LedgerTables' for a +-- more thorough description. +-- -- The main operations we can do with a 'LedgerState' are /ticking/ (defined in -- 'IsLedger'), and /applying a block/ (defined in -- 'Ouroboros.Consensus.Ledger.Abstract.ApplyBlock'). -data family LedgerState blk :: Type +type LedgerState :: Type -> LedgerStateKind +data family LedgerState blk mk +type TickedLedgerState blk = Ticked1 (LedgerState blk) type instance HeaderHash (LedgerState blk) = HeaderHash blk -type LedgerConfig blk = LedgerCfg (LedgerState blk) -type LedgerError blk = LedgerErr (LedgerState blk) -type TickedLedgerState blk = Ticked (LedgerState blk) +instance StandardHash blk => StandardHash (LedgerState blk) + +type LedgerConfig blk = LedgerCfg (LedgerState blk) +type LedgerError blk = LedgerErr (LedgerState blk) diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Ledger/CommonProtocolParams.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Ledger/CommonProtocolParams.hs index 4e0c2e6518..3d5c616444 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Ledger/CommonProtocolParams.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Ledger/CommonProtocolParams.hs @@ -8,8 +8,8 @@ class UpdateLedger blk => CommonProtocolParams blk where -- | The maximum header size in bytes according to the currently adopted -- protocol parameters of the ledger state. - maxHeaderSize :: LedgerState blk -> Word32 + maxHeaderSize :: LedgerState blk mk -> Word32 -- | The maximum transaction size in bytes according to the currently -- adopted protocol parameters of the ledger state. - maxTxSize :: LedgerState blk -> Word32 + maxTxSize :: LedgerState blk mk -> Word32 diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Ledger/Dual.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Ledger/Dual.hs index a5dc517634..51236c1631 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Ledger/Dual.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Ledger/Dual.hs @@ -1,4 +1,8 @@ +{-# LANGUAGE CPP #-} {-# LANGUAGE ConstraintKinds #-} +#if __GLASGOW_HASKELL__ < 900 +{-# LANGUAGE DataKinds #-} +#endif {-# LANGUAGE DeriveGeneric #-} {-# LANGUAGE DerivingVia #-} {-# LANGUAGE EmptyCase #-} @@ -36,9 +40,10 @@ module Ouroboros.Consensus.Ledger.Dual ( , GenTx (..) , Header (..) , LedgerState (..) + , LedgerTables (..) , NestedCtxt_ (..) , StorageConfig (..) - , Ticked (..) + , Ticked1 (..) , TxId (..) , Validated (..) -- * Serialisation @@ -83,6 +88,7 @@ import Ouroboros.Consensus.Ledger.Query import Ouroboros.Consensus.Ledger.SupportsMempool import Ouroboros.Consensus.Ledger.SupportsPeerSelection import Ouroboros.Consensus.Ledger.SupportsProtocol +import Ouroboros.Consensus.Ledger.Tables.Utils import Ouroboros.Consensus.Storage.Serialisation import Ouroboros.Consensus.Util (ShowProxy (..)) import Ouroboros.Consensus.Util.Condense @@ -330,12 +336,15 @@ type instance LedgerCfg (LedgerState (DualBlock m a)) = DualLedgerConfig m a instance Bridge m a => GetTip (LedgerState (DualBlock m a)) where getTip = castPoint . getTip . dualLedgerStateMain -instance Bridge m a => GetTip (Ticked (LedgerState (DualBlock m a))) where +instance Bridge m a => GetTip (Ticked1 (LedgerState (DualBlock m a))) where getTip = castPoint . getTip . tickedDualLedgerStateMain -data instance Ticked (LedgerState (DualBlock m a)) = TickedDualLedgerState { - tickedDualLedgerStateMain :: Ticked (LedgerState m) - , tickedDualLedgerStateAux :: Ticked (LedgerState a) +-- We only have tables on the main ledger state to be able to compare it to a +-- reference spec implementation which doesn't use tables. The result should be +-- the same. +data instance Ticked1 (LedgerState (DualBlock m a)) mk = TickedDualLedgerState { + tickedDualLedgerStateMain :: Ticked1 (LedgerState m) mk + , tickedDualLedgerStateAux :: Ticked1 (LedgerState a) ValuesMK , tickedDualLedgerStateBridge :: BridgeLedger m a -- | The original, unticked ledger for the auxiliary block @@ -343,9 +352,9 @@ data instance Ticked (LedgerState (DualBlock m a)) = TickedDualLedgerState { -- The reason we keep this in addition to the ticked ledger state is that -- not every main block is paired with an auxiliary block. When there is -- no auxiliary block, the auxiliary ledger state remains unchanged. - , tickedDualLedgerStateAuxOrig :: LedgerState a + , tickedDualLedgerStateAuxOrig :: LedgerState a ValuesMK } - deriving NoThunks via AllowThunk (Ticked (LedgerState (DualBlock m a))) + deriving NoThunks via AllowThunk (Ticked1 (LedgerState (DualBlock m a)) mk) instance Bridge m a => IsLedger (LedgerState (DualBlock m a)) where type LedgerErr (LedgerState (DualBlock m a)) = DualLedgerError m a @@ -364,14 +373,15 @@ instance Bridge m a => IsLedger (LedgerState (DualBlock m a)) where DualLedgerState{..} = castLedgerResult ledgerResult <&> \main -> TickedDualLedgerState { tickedDualLedgerStateMain = main - , tickedDualLedgerStateAux = applyChainTick - dualLedgerConfigAux - slot - dualLedgerStateAux + , tickedDualLedgerStateAux = applyDiffs dualLedgerStateAux dualLedger , tickedDualLedgerStateAuxOrig = dualLedgerStateAux , tickedDualLedgerStateBridge = dualLedgerStateBridge } where + dualLedger = applyChainTick + dualLedgerConfigAux + slot + (forgetLedgerTables dualLedgerStateAux) ledgerResult = applyChainTickLedgerResult dualLedgerConfigMain slot @@ -392,11 +402,11 @@ instance Bridge m a => ApplyBlock (LedgerState (DualBlock m a)) (DualBlock m a) (dualLedgerConfigAux cfg) dualBlockAux tickedDualLedgerStateAux - tickedDualLedgerStateAuxOrig + (forgetLedgerTables tickedDualLedgerStateAuxOrig) ) return $ castLedgerResult ledgerResult <&> \main' -> DualLedgerState { dualLedgerStateMain = main' - , dualLedgerStateAux = aux' + , dualLedgerStateAux = applyDiffs tickedDualLedgerStateAux aux' , dualLedgerStateBridge = updateBridgeWithBlock block tickedDualLedgerStateBridge @@ -407,34 +417,39 @@ instance Bridge m a => ApplyBlock (LedgerState (DualBlock m a)) (DualBlock m a) TickedDualLedgerState{..} = castLedgerResult ledgerResult <&> \main' -> DualLedgerState { dualLedgerStateMain = main' - , dualLedgerStateAux = reapplyMaybeBlock - (dualLedgerConfigAux cfg) - dualBlockAux - tickedDualLedgerStateAux - tickedDualLedgerStateAuxOrig + , dualLedgerStateAux = applyDiffs tickedDualLedgerStateAux auxLedger , dualLedgerStateBridge = updateBridgeWithBlock block tickedDualLedgerStateBridge } - where + where + auxLedger = reapplyMaybeBlock + (dualLedgerConfigAux cfg) + dualBlockAux + tickedDualLedgerStateAux + (forgetLedgerTables tickedDualLedgerStateAuxOrig) ledgerResult = reapplyBlockLedgerResult (dualLedgerConfigMain cfg) dualBlockMain tickedDualLedgerStateMain -data instance LedgerState (DualBlock m a) = DualLedgerState { - dualLedgerStateMain :: LedgerState m - , dualLedgerStateAux :: LedgerState a + getBlockKeySets = castLedgerTables + . getBlockKeySets @(LedgerState m) + . dualBlockMain + +data instance LedgerState (DualBlock m a) mk = DualLedgerState { + dualLedgerStateMain :: LedgerState m mk + , dualLedgerStateAux :: LedgerState a ValuesMK , dualLedgerStateBridge :: BridgeLedger m a } - deriving NoThunks via AllowThunk (LedgerState (DualBlock m a)) + deriving NoThunks via AllowThunk (LedgerState (DualBlock m a) mk) instance Bridge m a => UpdateLedger (DualBlock m a) -deriving instance ( Bridge m a - ) => Show (LedgerState (DualBlock m a)) -deriving instance ( Bridge m a - ) => Eq (LedgerState (DualBlock m a)) +deriving instance ( Bridge m a, ShowMK mk + ) => Show (LedgerState (DualBlock m a) mk) +deriving instance ( Bridge m a, EqMK mk + ) => Eq (LedgerState (DualBlock m a) mk) {------------------------------------------------------------------------------- Utilities for working with the extended ledger state @@ -496,7 +511,7 @@ instance Bridge m a => HasHardForkHistory (DualBlock m a) where Querying the ledger -------------------------------------------------------------------------------} -data instance BlockQuery (DualBlock m a) result +data instance BlockQuery (DualBlock m a) footprint result deriving (Show) instance (Typeable m, Typeable a) @@ -504,12 +519,14 @@ instance (Typeable m, Typeable a) -- | Not used in the tests: no constructors instance Bridge m a => BlockSupportsLedgerQuery (DualBlock m a) where - answerBlockQuery _ = \case {} + answerPureBlockQuery _ = \case {} + answerBlockQueryLookup _ = \case {} + answerBlockQueryTraverse _ = \case {} -instance SameDepIndex (BlockQuery (DualBlock m a)) where - sameDepIndex = \case {} +instance SameDepIndex2 (BlockQuery (DualBlock m a)) where + sameDepIndex2 = \case {} -instance ShowQuery (BlockQuery (DualBlock m a)) where +instance ShowQuery (BlockQuery (DualBlock m a) footprint) where showResult = \case {} -- | Forward to the main ledger @@ -574,14 +591,15 @@ instance Bridge m a => LedgerSupportsMempool (DualBlock m a) where , vDualGenTxAux = auxVtx , vDualGenTxBridge = dualGenTxBridge } - return $ flip (,) vtx $ TickedDualLedgerState { + return (TickedDualLedgerState { tickedDualLedgerStateMain = main' - , tickedDualLedgerStateAux = aux' + , tickedDualLedgerStateAux = applyDiffs tickedDualLedgerStateAux aux' , tickedDualLedgerStateAuxOrig = tickedDualLedgerStateAuxOrig , tickedDualLedgerStateBridge = updateBridgeWithTx vtx tickedDualLedgerStateBridge - } + }, vtx) + reapplyTx DualLedgerConfig{..} slot @@ -622,6 +640,10 @@ instance Bridge m a => LedgerSupportsMempool (DualBlock m a) where , vDualGenTxBridge } = vtx + getTransactionKeySets = castLedgerTables + . getTransactionKeySets @m + . dualGenTxMain + instance Bridge m a => TxLimits (DualBlock m a) where type TxMeasure (DualBlock m a) = TxMeasure m @@ -768,10 +790,10 @@ type instance ForgeStateUpdateError (DualBlock m a) = ForgeStateUpdateError m applyMaybeBlock :: UpdateLedger blk => LedgerConfig blk -> Maybe blk - -> TickedLedgerState blk - -> LedgerState blk - -> Except (LedgerError blk) (LedgerState blk) -applyMaybeBlock _ Nothing _ st = return st + -> TickedLedgerState blk ValuesMK + -> LedgerState blk EmptyMK + -> Except (LedgerError blk) (LedgerState blk DiffMK) +applyMaybeBlock _ Nothing _ st = return $ st `withLedgerTables` emptyLedgerTables applyMaybeBlock cfg (Just block) tst _ = applyLedgerBlock cfg block tst -- | Lift 'reapplyLedgerBlock' to @Maybe blk@ @@ -780,10 +802,10 @@ applyMaybeBlock cfg (Just block) tst _ = applyLedgerBlock cfg block tst reapplyMaybeBlock :: UpdateLedger blk => LedgerConfig blk -> Maybe blk - -> TickedLedgerState blk - -> LedgerState blk - -> LedgerState blk -reapplyMaybeBlock _ Nothing _ st = st + -> TickedLedgerState blk ValuesMK + -> LedgerState blk EmptyMK + -> LedgerState blk DiffMK +reapplyMaybeBlock _ Nothing _ st = st `withLedgerTables` emptyLedgerTables reapplyMaybeBlock cfg (Just block) tst _ = reapplyLedgerBlock cfg block tst -- | Used when the concrete and abstract implementation should agree on errors @@ -896,9 +918,9 @@ decodeDualGenTxErr decodeMain = do <$> decodeMain <*> decode -encodeDualLedgerState :: (Bridge m a, Serialise (LedgerState a)) - => (LedgerState m -> Encoding) - -> LedgerState (DualBlock m a) -> Encoding +encodeDualLedgerState :: (Bridge m a, Serialise (LedgerState a ValuesMK)) + => (LedgerState m mk -> Encoding) + -> LedgerState (DualBlock m a) mk -> Encoding encodeDualLedgerState encodeMain DualLedgerState{..} = mconcat [ encodeListLen 3 , encodeMain dualLedgerStateMain @@ -906,12 +928,98 @@ encodeDualLedgerState encodeMain DualLedgerState{..} = mconcat [ , encode dualLedgerStateBridge ] -decodeDualLedgerState :: (Bridge m a, Serialise (LedgerState a)) - => Decoder s (LedgerState m) - -> Decoder s (LedgerState (DualBlock m a)) +decodeDualLedgerState :: (Bridge m a, Serialise (LedgerState a ValuesMK)) + => Decoder s (LedgerState m mk) + -> Decoder s (LedgerState (DualBlock m a) mk) decodeDualLedgerState decodeMain = do enforceSize "DualLedgerState" 3 DualLedgerState <$> decodeMain <*> decode <*> decode + +{------------------------------------------------------------------------------- + Ledger Tables +-------------------------------------------------------------------------------} + +type instance Key (LedgerState (DualBlock m a)) = Key (LedgerState m) +type instance Value (LedgerState (DualBlock m a)) = Value (LedgerState m) + +instance ( + Bridge m a +#if __GLASGOW_HASKELL__ >= 906 + , NoThunks (Value (LedgerState m)) + , NoThunks (Key (LedgerState m)) + , Show (Value (LedgerState m)) + , Show (Key (LedgerState m)) + , Eq (Value (LedgerState m)) + , Ord (Key (LedgerState m)) +#endif + ) => HasLedgerTables (LedgerState (DualBlock m a)) where + projectLedgerTables DualLedgerState{..} = + castLedgerTables + (projectLedgerTables dualLedgerStateMain) + + withLedgerTables DualLedgerState{..} main = + DualLedgerState { + dualLedgerStateMain = withLedgerTables dualLedgerStateMain + $ castLedgerTables main + , dualLedgerStateAux = dualLedgerStateAux + , dualLedgerStateBridge = dualLedgerStateBridge + } + +instance ( + Bridge m a +#if __GLASGOW_HASKELL__ >= 906 + , NoThunks (Value (LedgerState m)) + , NoThunks (Key (LedgerState m)) + , Show (Value (LedgerState m)) + , Show (Key (LedgerState m)) + , Eq (Value (LedgerState m)) + , Ord (Key (LedgerState m)) +#endif + )=> HasLedgerTables (Ticked1 (LedgerState (DualBlock m a))) where + projectLedgerTables TickedDualLedgerState{..} = + castLedgerTables + (projectLedgerTables tickedDualLedgerStateMain) + + withLedgerTables + TickedDualLedgerState{..} + main = + TickedDualLedgerState { + tickedDualLedgerStateMain = + withLedgerTables tickedDualLedgerStateMain $ castLedgerTables main + , tickedDualLedgerStateAux + , tickedDualLedgerStateBridge + , tickedDualLedgerStateAuxOrig + } + +instance CanSerializeLedgerTables (LedgerState m) + => CanSerializeLedgerTables (LedgerState (DualBlock m a)) where + codecLedgerTables = castLedgerTables $ codecLedgerTables @(LedgerState m) + +instance CanStowLedgerTables (LedgerState m) + => CanStowLedgerTables (LedgerState (DualBlock m a)) where + stowLedgerTables dls = + DualLedgerState{ + dualLedgerStateMain = stowLedgerTables dualLedgerStateMain + , dualLedgerStateAux + , dualLedgerStateBridge + } + where + DualLedgerState { dualLedgerStateMain + , dualLedgerStateAux + , dualLedgerStateBridge + } = dls + + unstowLedgerTables dls = + DualLedgerState{ + dualLedgerStateMain = unstowLedgerTables dualLedgerStateMain + , dualLedgerStateAux + , dualLedgerStateBridge + } + where + DualLedgerState { dualLedgerStateMain + , dualLedgerStateAux + , dualLedgerStateBridge + } = dls diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Ledger/Extended.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Ledger/Extended.hs index 522e2e2b51..6dd00dfe99 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Ledger/Extended.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Ledger/Extended.hs @@ -1,8 +1,12 @@ +{- HLINT ignore "Unused LANGUAGE pragma" -} -- False hint on TypeOperators + {-# LANGUAGE CPP #-} +{-# LANGUAGE DeriveAnyClass #-} {-# LANGUAGE DeriveGeneric #-} {-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE FlexibleInstances #-} {-# LANGUAGE MultiParamTypeClasses #-} +{-# LANGUAGE NamedFieldPuns #-} {-# LANGUAGE RankNTypes #-} {-# LANGUAGE RecordWildCards #-} {-# LANGUAGE ScopedTypeVariables #-} @@ -22,16 +26,14 @@ module Ouroboros.Consensus.Ledger.Extended ( , decodeExtLedgerState , encodeDiskExtLedgerState , encodeExtLedgerState - -- * Casts - , castExtLedgerState -- * Type family instances - , Ticked (..) + , LedgerTables (..) + , Ticked1 (..) ) where import Codec.CBOR.Decoding (Decoder, decodeListLenOf) import Codec.CBOR.Encoding (Encoding, encodeListLen) import Control.Monad.Except -import Data.Coerce import Data.Functor ((<&>)) import Data.Proxy import Data.Typeable @@ -44,51 +46,61 @@ import Ouroboros.Consensus.Ledger.Abstract import Ouroboros.Consensus.Ledger.SupportsProtocol import Ouroboros.Consensus.Protocol.Abstract import Ouroboros.Consensus.Storage.Serialisation +import Ouroboros.Consensus.Ticked {------------------------------------------------------------------------------- Extended ledger state -------------------------------------------------------------------------------} +data ExtValidationError blk = + ExtValidationErrorLedger !(LedgerError blk) + | ExtValidationErrorHeader !(HeaderError blk) + deriving (Generic) + +deriving instance LedgerSupportsProtocol blk => Eq (ExtValidationError blk) +deriving instance LedgerSupportsProtocol blk => NoThunks (ExtValidationError blk) +deriving instance LedgerSupportsProtocol blk => Show (ExtValidationError blk) + -- | Extended ledger state -- -- This is the combination of the header state and the ledger state proper. -data ExtLedgerState blk = ExtLedgerState { - ledgerState :: !(LedgerState blk) +data ExtLedgerState blk mk = ExtLedgerState { + ledgerState :: !(LedgerState blk mk) , headerState :: !(HeaderState blk) } deriving (Generic) -data ExtValidationError blk = - ExtValidationErrorLedger !(LedgerError blk) - | ExtValidationErrorHeader !(HeaderError blk) - deriving (Generic) - -instance LedgerSupportsProtocol blk => NoThunks (ExtValidationError blk) - -deriving instance LedgerSupportsProtocol blk => Show (ExtLedgerState blk) -deriving instance LedgerSupportsProtocol blk => Show (ExtValidationError blk) -deriving instance LedgerSupportsProtocol blk => Eq (ExtValidationError blk) +deriving instance (EqMK mk, LedgerSupportsProtocol blk) + => Eq (ExtLedgerState blk mk) +deriving instance (ShowMK mk, LedgerSupportsProtocol blk) + => Show (ExtLedgerState blk mk) -- | We override 'showTypeOf' to show the type of the block -- -- This makes debugging a bit easier, as the block gets used to resolve all -- kinds of type families. -instance LedgerSupportsProtocol blk => NoThunks (ExtLedgerState blk) where +instance (NoThunksMK mk, LedgerSupportsProtocol blk) + => NoThunks (ExtLedgerState blk mk) where showTypeOf _ = show $ typeRep (Proxy @(ExtLedgerState blk)) -deriving instance ( LedgerSupportsProtocol blk - ) => Eq (ExtLedgerState blk) +type instance HeaderHash (ExtLedgerState blk) = HeaderHash (LedgerState blk) +instance ( + NoThunks (HeaderHash blk) + , Typeable (HeaderHash blk) + , Show (HeaderHash blk) + , Ord (HeaderHash blk) +#if __GLASGOW_HASKELL__ >= 906 + , Eq (HeaderHash blk) +#endif + ) => StandardHash (ExtLedgerState blk) + +instance IsLedger (LedgerState blk) => GetTip (ExtLedgerState blk) where + getTip = castPoint . getTip . ledgerState {------------------------------------------------------------------------------- - The extended ledger can behave like a ledger + The extended ledger configuration -------------------------------------------------------------------------------} -data instance Ticked (ExtLedgerState blk) = TickedExtLedgerState { - tickedLedgerState :: Ticked (LedgerState blk) - , ledgerView :: LedgerView (BlockProtocol blk) - , tickedHeaderState :: Ticked (HeaderState blk) - } - -- | " Ledger " configuration for the extended ledger -- -- Since the extended ledger also does the consensus protocol validation, we @@ -108,17 +120,24 @@ instance ( ConsensusProtocol (BlockProtocol blk) type instance LedgerCfg (ExtLedgerState blk) = ExtLedgerCfg blk -type instance HeaderHash (ExtLedgerState blk) = HeaderHash (LedgerState blk) +{------------------------------------------------------------------------------- + The ticked extended ledger state +-------------------------------------------------------------------------------} -instance IsLedger (LedgerState blk) => GetTip (ExtLedgerState blk) where - getTip = castPoint . getTip . ledgerState +data instance Ticked1 (ExtLedgerState blk) mk = TickedExtLedgerState { + tickedLedgerState :: Ticked1 (LedgerState blk) mk + , ledgerView :: LedgerView (BlockProtocol blk) + , tickedHeaderState :: Ticked (HeaderState blk) + } -instance IsLedger (LedgerState blk) => GetTip (Ticked (ExtLedgerState blk)) where +instance IsLedger (LedgerState blk) => GetTip (Ticked1 (ExtLedgerState blk)) where getTip = castPoint . getTip . tickedLedgerState -instance ( LedgerSupportsProtocol blk - ) - => IsLedger (ExtLedgerState blk) where +{------------------------------------------------------------------------------- + Ledger interface +-------------------------------------------------------------------------------} + +instance LedgerSupportsProtocol blk => IsLedger (ExtLedgerState blk) where type LedgerErr (ExtLedgerState blk) = ExtValidationError blk type AuxLedgerEvent (ExtLedgerState blk) = AuxLedgerEvent (LedgerState blk) @@ -174,18 +193,20 @@ instance LedgerSupportsProtocol blk => ApplyBlock (ExtLedgerState blk) blk where (getHeader blk) tickedHeaderState + getBlockKeySets = castLedgerTables . getBlockKeySets @(LedgerState blk) + {------------------------------------------------------------------------------- Serialisation -------------------------------------------------------------------------------} -encodeExtLedgerState :: (LedgerState blk -> Encoding) +encodeExtLedgerState :: (LedgerState blk mk -> Encoding) -> (ChainDepState (BlockProtocol blk) -> Encoding) -> (AnnTip blk -> Encoding) - -> ExtLedgerState blk -> Encoding + -> ExtLedgerState blk mk -> Encoding encodeExtLedgerState encodeLedgerState encodeChainDepState encodeAnnTip - ExtLedgerState{..} = mconcat [ + ExtLedgerState{ledgerState, headerState} = mconcat [ encodeListLen 2 , encodeLedgerState ledgerState , encodeHeaderState' headerState @@ -197,28 +218,28 @@ encodeExtLedgerState encodeLedgerState encodeDiskExtLedgerState :: forall blk. - (EncodeDisk blk (LedgerState blk), + (EncodeDisk blk (LedgerState blk EmptyMK), EncodeDisk blk (ChainDepState (BlockProtocol blk)), EncodeDisk blk (AnnTip blk) ) - => (CodecConfig blk -> ExtLedgerState blk -> Encoding) + => (CodecConfig blk -> ExtLedgerState blk EmptyMK -> Encoding) encodeDiskExtLedgerState cfg = encodeExtLedgerState (encodeDisk cfg) (encodeDisk cfg) (encodeDisk cfg) -decodeExtLedgerState :: (forall s. Decoder s (LedgerState blk)) +decodeExtLedgerState :: (forall s. Decoder s (LedgerState blk EmptyMK)) -> (forall s. Decoder s (ChainDepState (BlockProtocol blk))) -> (forall s. Decoder s (AnnTip blk)) - -> (forall s. Decoder s (ExtLedgerState blk)) + -> (forall s. Decoder s (ExtLedgerState blk EmptyMK)) decodeExtLedgerState decodeLedgerState decodeChainDepState decodeAnnTip = do decodeListLenOf 2 ledgerState <- decodeLedgerState headerState <- decodeHeaderState' - return ExtLedgerState{..} + return ExtLedgerState{ledgerState, headerState} where decodeHeaderState' = decodeHeaderState decodeChainDepState @@ -226,11 +247,11 @@ decodeExtLedgerState decodeLedgerState decodeDiskExtLedgerState :: forall blk. - (DecodeDisk blk (LedgerState blk), + (DecodeDisk blk (LedgerState blk EmptyMK), DecodeDisk blk (ChainDepState (BlockProtocol blk)), DecodeDisk blk (AnnTip blk) ) - => (CodecConfig blk -> forall s. Decoder s (ExtLedgerState blk)) + => (CodecConfig blk -> forall s. Decoder s (ExtLedgerState blk EmptyMK)) decodeDiskExtLedgerState cfg = decodeExtLedgerState (decodeDisk cfg) @@ -238,18 +259,68 @@ decodeDiskExtLedgerState cfg = (decodeDisk cfg) {------------------------------------------------------------------------------- - Casts + Ledger Tables -------------------------------------------------------------------------------} -castExtLedgerState :: - ( Coercible (LedgerState blk) - (LedgerState blk') - , Coercible (ChainDepState (BlockProtocol blk)) - (ChainDepState (BlockProtocol blk')) - , TipInfo blk ~ TipInfo blk' - ) - => ExtLedgerState blk -> ExtLedgerState blk' -castExtLedgerState ExtLedgerState{..} = ExtLedgerState { - ledgerState = coerce ledgerState - , headerState = castHeaderState headerState - } +type instance Key (ExtLedgerState blk) = Key (LedgerState blk) +type instance Value (ExtLedgerState blk) = Value (LedgerState blk) + +instance ( + HasLedgerTables (LedgerState blk) +#if __GLASGOW_HASKELL__ >= 906 + , NoThunks (Value (LedgerState blk)) + , NoThunks (Key (LedgerState blk)) + , Show (Value (LedgerState blk)) + , Show (Key (LedgerState blk)) + , Eq (Value (LedgerState blk)) + , Ord (Key (LedgerState blk)) +#endif + ) => HasLedgerTables (ExtLedgerState blk) where + projectLedgerTables (ExtLedgerState lstate _) = + castLedgerTables (projectLedgerTables lstate) + withLedgerTables (ExtLedgerState lstate hstate) tables = + ExtLedgerState + (lstate `withLedgerTables` castLedgerTables tables) + hstate + +instance CanSerializeLedgerTables (LedgerState blk) + => CanSerializeLedgerTables (ExtLedgerState blk) where + codecLedgerTables = castLedgerTables $ codecLedgerTables @(LedgerState blk) + +instance LedgerTablesAreTrivial (LedgerState blk) + => LedgerTablesAreTrivial (ExtLedgerState blk) where + convertMapKind (ExtLedgerState x y) = ExtLedgerState (convertMapKind x) y + +instance LedgerTablesAreTrivial (Ticked1 (LedgerState blk)) + => LedgerTablesAreTrivial (Ticked1 (ExtLedgerState blk)) where + convertMapKind (TickedExtLedgerState x y z) = + TickedExtLedgerState (convertMapKind x) y z + +instance ( + HasLedgerTables (Ticked1 (LedgerState blk)) +#if __GLASGOW_HASKELL__ >= 906 + , NoThunks (Value (LedgerState blk)) + , NoThunks (Key (LedgerState blk)) + , Show (Value (LedgerState blk)) + , Show (Key (LedgerState blk)) + , Eq (Value (LedgerState blk)) + , Ord (Key (LedgerState blk)) +#endif + ) => HasLedgerTables (Ticked1 (ExtLedgerState blk)) where + projectLedgerTables (TickedExtLedgerState lstate _view _hstate) = + castLedgerTables (projectLedgerTables lstate) + withLedgerTables + (TickedExtLedgerState lstate view hstate) + tables = + TickedExtLedgerState + (lstate `withLedgerTables` castLedgerTables tables) + view + hstate + +instance CanStowLedgerTables (LedgerState blk) + => CanStowLedgerTables (ExtLedgerState blk) where + stowLedgerTables (ExtLedgerState lstate hstate) = + ExtLedgerState (stowLedgerTables lstate) hstate + + unstowLedgerTables (ExtLedgerState lstate hstate) = + ExtLedgerState (unstowLedgerTables lstate) hstate diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Ledger/Inspect.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Ledger/Inspect.hs index e0f2d9fe5c..67ced8c8af 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Ledger/Inspect.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Ledger/Inspect.hs @@ -66,8 +66,8 @@ class ( Show (LedgerWarning blk) -- leaving it at this for now. inspectLedger :: TopLevelConfig blk - -> LedgerState blk -- ^ Before - -> LedgerState blk -- ^ After + -> LedgerState blk mk1 -- ^ Before + -> LedgerState blk mk2 -- ^ After -> [LedgerEvent blk] -- Defaults @@ -81,8 +81,8 @@ class ( Show (LedgerWarning blk) , LedgerUpdate blk ~ Void ) => TopLevelConfig blk - -> LedgerState blk -- ^ Before - -> LedgerState blk -- ^ After + -> LedgerState blk mk1 -- ^ Before + -> LedgerState blk mk2 -- ^ After -> [LedgerEvent blk] inspectLedger _ _ _ = [] where diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Ledger/Query.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Ledger/Query.hs index 028adc66d5..e9cf580613 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Ledger/Query.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Ledger/Query.hs @@ -1,26 +1,40 @@ +{-# LANGUAGE CPP #-} +{-# LANGUAGE DataKinds #-} +{-# LANGUAGE DerivingStrategies #-} {-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE FlexibleInstances #-} {-# LANGUAGE GADTs #-} +{-# LANGUAGE GeneralizedNewtypeDeriving #-} {-# LANGUAGE MultiParamTypeClasses #-} {-# LANGUAGE QuantifiedConstraints #-} {-# LANGUAGE RankNTypes #-} {-# LANGUAGE ScopedTypeVariables #-} {-# LANGUAGE StandaloneDeriving #-} +{-# LANGUAGE StandaloneKindSignatures #-} {-# LANGUAGE TypeApplications #-} {-# LANGUAGE TypeFamilies #-} +{-# LANGUAGE TypeOperators #-} {-# LANGUAGE UndecidableInstances #-} module Ouroboros.Consensus.Ledger.Query ( - BlockQuery + -- * Queries that can be answered by the Consensus layer + Query (..) + , answerQuery + -- * How to answer specific queries + , BlockQuery , BlockSupportsLedgerQuery (..) , ConfigSupportsNode (..) - , Query (..) - , QueryVersion (..) , ShowQuery (..) - , answerQuery + -- * Version + , QueryVersion (..) , nodeToClientVersionToQueryVersion + -- * Serialization , queryDecodeNodeToClient , queryEncodeNodeToClient + -- * Footprints + , QueryFootprint (..) + , SQueryFootprint (..) + , SomeBlockQuery (..) ) where import Cardano.Binary (FromCBOR (..), ToCBOR (..)) @@ -30,28 +44,132 @@ import Codec.CBOR.Decoding import Codec.CBOR.Encoding import Codec.Serialise (Serialise) import Codec.Serialise.Class (decode, encode) -import Control.Exception (Exception, throw) +import Control.Exception (throw) import Data.Kind (Type) import Data.Maybe (isJust) -import Data.Typeable (Typeable) +import Data.Singletons +import Data.SOP.BasicFunctors import Ouroboros.Consensus.Block.Abstract (CodecConfig) import Ouroboros.Consensus.BlockchainTime (SystemStart) import Ouroboros.Consensus.Config import Ouroboros.Consensus.Config.SupportsNode import Ouroboros.Consensus.HeaderValidation (HasAnnTip (..), headerStateBlockNo, headerStatePoint) +import Ouroboros.Consensus.Ledger.Basics import Ouroboros.Consensus.Ledger.Extended import Ouroboros.Consensus.Ledger.Query.Version import Ouroboros.Consensus.Node.NetworkProtocolVersion (BlockNodeToClientVersion) import Ouroboros.Consensus.Node.Serialisation - (SerialiseNodeToClient (..), SerialiseResult (..)) + (SerialiseNodeToClient (..), SerialiseResult (..), + SerialiseResult' (..)) +import Ouroboros.Consensus.Storage.LedgerDB +import qualified Ouroboros.Consensus.Storage.LedgerDB as LedgerDB import Ouroboros.Consensus.Util (ShowProxy (..), SomeSecond (..)) import Ouroboros.Consensus.Util.DepPair +import Ouroboros.Consensus.Util.IOLike import Ouroboros.Network.Block (HeaderHash, Point (..), StandardHash, decodePoint, encodePoint) import Ouroboros.Network.Protocol.LocalStateQuery.Type - (ShowQuery (..)) + +{------------------------------------------------------------------------------- + Footprints +-------------------------------------------------------------------------------} + +-- | Queries on the local state might require reading ledger tables from disk. +-- This datatype (which will sometimes be concretized via @sing@) allows +-- Consensus to categorize the queries. +data QueryFootprint = + -- | The query doesn't need ledger tables, thus can be answered only with + -- the ledger state. + QFNoTables + -- | The query needs some tables, but doesn't need to traverse the whole + -- backing store. + | QFLookupTables + -- | The query needs to traverse the whole backing store. + | QFTraverseTables + +type instance Sing = SQueryFootprint + +type SQueryFootprint :: QueryFootprint -> Type +data SQueryFootprint a where + SQFNoTables :: SQueryFootprint QFNoTables + SQFLookupTables :: SQueryFootprint QFLookupTables + SQFTraverseTables :: SQueryFootprint QFTraverseTables + +instance SingI QFNoTables where + sing = SQFNoTables +instance SingI QFLookupTables where + sing = SQFLookupTables +instance SingI QFTraverseTables where + sing = SQFTraverseTables + +type SomeBlockQuery :: (QueryFootprint -> Type -> Type) -> Type +data SomeBlockQuery q = + forall footprint result. SingI footprint => SomeBlockQuery (q footprint result) + +{------------------------------------------------------------------------------- + Block Queries +-------------------------------------------------------------------------------} + +-- | Different queries supported by the ledger, indexed by the result type. +type BlockQuery :: Type -> QueryFootprint -> Type -> Type +data family BlockQuery + +-- | Query the ledger extended state. +-- +-- Used by the LocalStateQuery protocol to allow clients to query the extended +-- ledger state. +class + -- These instances are not needed for BlockSupportsLedgerQuery but we bundle them here + -- so that we don't need to put them in 'SingleEraBlock' later on + ( +#if __GLASGOW_HASKELL__ <= 902 + forall fp result. Show (BlockQuery blk fp result), +#endif + forall fp. ShowQuery (BlockQuery blk fp) + , SameDepIndex2 (BlockQuery blk) + ) + => BlockSupportsLedgerQuery blk where + + -- | Answer the given query about the extended ledger state, without reading + -- ledger tables from the disk. + answerPureBlockQuery :: + ExtLedgerCfg blk + -> BlockQuery blk QFNoTables result + -> ExtLedgerState blk EmptyMK + -> result + + -- | Answer a query that requires to perform a lookup on the ledger tables. As + -- consensus always runs with a HardForkBlock, this might result in a + -- different code path to answer a query compared to the one that a single + -- block would take, one that is aware of the fact that the ledger tables + -- might be HF ledger tables thus making use of some utilities to make these + -- queries faster. + -- + -- For the hard fork block this will be instantiated to + -- @answerBlockQueryHFOne@. + answerBlockQueryLookup :: + MonadSTM m + => ExtLedgerCfg blk + -> BlockQuery blk QFLookupTables result + -> ReadOnlyForker' m blk + -> m result + + -- | Answer a query that requires to traverse the ledger tables. As consensus + -- always runs with a HardForkBlock, this might result in a different code + -- path to answer a query compared to the one that a single block would take, + -- one that is aware of the fact that the ledger tables might be HF ledger + -- tables thus making use of some utilities to make these queries faster. + -- + -- For the hard fork block this will be instantiated to + -- @answerBlockQueryHFAll@. + answerBlockQueryTraverse :: + MonadSTM m + => ExtLedgerCfg blk + -> BlockQuery blk QFTraverseTables result + -> ReadOnlyForker' m blk + -> m result {------------------------------------------------------------------------------- Queries @@ -68,10 +186,12 @@ queryName query = case query of -- by the result type. -- -- Additions to the set of queries is versioned by 'QueryVersion' +type Query :: Type -> Type -> Type data Query blk result where -- | This constructor is supported by all @QueryVersion@s. The @BlockQuery@ -- argument is versioned by the @BlockNodeToClientVersion blk@. - BlockQuery :: BlockQuery blk result -> Query blk result + BlockQuery :: + SingI footprint => BlockQuery blk footprint result -> Query blk result -- | Get the 'SystemStart' time. -- @@ -88,53 +208,116 @@ data Query blk result where -- Supported by 'QueryVersion' >= 'QueryVersion2'. GetChainPoint :: Query blk (Point blk) +-- | Answer the given query about the extended ledger state. +answerQuery :: + forall blk m result. + (BlockSupportsLedgerQuery blk, ConfigSupportsNode blk, HasAnnTip blk, MonadSTM m) + => ExtLedgerCfg blk + -> ReadOnlyForker' m blk + -> Query blk result + -> m result +answerQuery config forker query = case query of + BlockQuery (blockQuery :: BlockQuery blk footprint result) -> + case sing :: Sing footprint of + SQFNoTables -> + answerPureBlockQuery config blockQuery <$> + atomically (LedgerDB.roforkerGetLedgerState forker) + SQFLookupTables -> + answerBlockQueryLookup config blockQuery forker + SQFTraverseTables -> + answerBlockQueryTraverse config blockQuery forker + GetSystemStart -> + pure $ getSystemStart (topLevelConfigBlock (getExtLedgerCfg config)) + GetChainBlockNo -> + headerStateBlockNo . headerState <$> + atomically (LedgerDB.roforkerGetLedgerState forker) + GetChainPoint -> + headerStatePoint . headerState <$> + atomically (LedgerDB.roforkerGetLedgerState forker) + +{------------------------------------------------------------------------------- + Query instances +-------------------------------------------------------------------------------} + +------ +-- Show +------ + +deriving instance + (forall footprint result. Show (BlockQuery blk footprint result)) + => Show (SomeBlockQuery (BlockQuery blk)) + +deriving instance + (forall footprint. Show (BlockQuery blk footprint result)) + => Show (Query blk result) + instance (ShowProxy (BlockQuery blk)) => ShowProxy (Query blk) where - showProxy (Proxy :: Proxy (Query blk)) = "Query (" ++ showProxy (Proxy @(BlockQuery blk)) ++ ")" + showProxy (Proxy :: Proxy (Query blk)) = + "Query (" ++ showProxy (Proxy @(BlockQuery blk)) ++ ")" -instance (ShowQuery (BlockQuery blk), StandardHash blk) => ShowQuery (Query blk) where +instance + (forall footprint. ShowQuery (BlockQuery blk footprint), StandardHash blk) + => ShowQuery (Query blk) where showResult (BlockQuery blockQuery) = showResult blockQuery showResult GetSystemStart = show showResult GetChainBlockNo = show showResult GetChainPoint = show -instance Eq (SomeSecond BlockQuery blk) => Eq (SomeSecond Query blk) where - SomeSecond (BlockQuery blockQueryA) == SomeSecond (BlockQuery blockQueryB) - = SomeSecond blockQueryA == SomeSecond blockQueryB - SomeSecond (BlockQuery _) == _ = False +instance Show (SomeBlockQuery (BlockQuery blk)) => Show (SomeSecond Query blk) where + show (SomeSecond (BlockQuery blockQueryA)) = + "Query " ++ show (SomeBlockQuery blockQueryA) + show (SomeSecond GetSystemStart) = "Query GetSystemStart" + show (SomeSecond GetChainBlockNo) = "Query GetChainBlockNo" + show (SomeSecond GetChainPoint) = "Query GetChainPoint" - SomeSecond GetSystemStart == SomeSecond GetSystemStart = True - SomeSecond GetSystemStart == _ = False +------ +-- Eq +------ - SomeSecond GetChainBlockNo == SomeSecond GetChainBlockNo = True - SomeSecond GetChainBlockNo == _ = False +instance SameDepIndex (Query blk) => Eq (SomeSecond Query blk) where + SomeSecond l == SomeSecond r = isJust $ sameDepIndex l r - SomeSecond GetChainPoint == SomeSecond GetChainPoint = True - SomeSecond GetChainPoint == _ = False +instance SameDepIndex2 query => Eq (SomeBlockQuery query) where + SomeBlockQuery l == SomeBlockQuery r = isJust $ sameDepIndex2 l r -instance Show (SomeSecond BlockQuery blk) => Show (SomeSecond Query blk) where - show (SomeSecond (BlockQuery blockQueryA)) = "Query " ++ show (SomeSecond blockQueryA) - show (SomeSecond GetSystemStart) = "Query GetSystemStart" - show (SomeSecond GetChainBlockNo) = "Query GetChainBlockNo" - show (SomeSecond GetChainPoint) = "Query GetChainPoint" +instance SameDepIndex2 (BlockQuery blk) => SameDepIndex (Query blk) where + sameDepIndex (BlockQuery blockQueryA) (BlockQuery blockQueryB) + = (\Refl -> Refl) <$> sameDepIndex2 blockQueryA blockQueryB + sameDepIndex (BlockQuery _) _ + = Nothing + sameDepIndex GetSystemStart GetSystemStart + = Just Refl + sameDepIndex GetSystemStart _ + = Nothing + sameDepIndex GetChainBlockNo GetChainBlockNo + = Just Refl + sameDepIndex GetChainBlockNo _ + = Nothing + sameDepIndex GetChainPoint GetChainPoint + = Just Refl + sameDepIndex GetChainPoint _ + = Nothing +------ +-- Serialization +------ --- | Exception thrown in the encoders -data QueryEncoderException blk = - -- | A query was submitted that is not supported by the given 'QueryVersion' - QueryEncoderUnsupportedQuery - (SomeSecond Query blk) - QueryVersion +deriving newtype instance + SerialiseNodeToClient blk ( SomeBlockQuery (query blk)) + => SerialiseNodeToClient blk ((SomeBlockQuery :.: query) blk) + +-- | Exception thrown in the encoders: A query was submitted that is not +-- supported by the given 'QueryVersion' +data QueryEncoderException = forall blk. Show (SomeSecond Query blk) => + QueryEncoderUnsupportedQuery (SomeSecond Query blk) QueryVersion -deriving instance Show (SomeSecond BlockQuery blk) - => Show (QueryEncoderException blk) -instance (Typeable blk, Show (SomeSecond BlockQuery blk)) - => Exception (QueryEncoderException blk) +deriving instance Show QueryEncoderException +instance Show QueryEncoderException => Exception QueryEncoderException queryEncodeNodeToClient :: forall blk. - Typeable blk - => Show (SomeSecond BlockQuery blk) - => SerialiseNodeToClient blk (SomeSecond BlockQuery blk) + SerialiseNodeToClient blk (SomeBlockQuery (BlockQuery blk)) + => Show (SomeSecond Query blk) => CodecConfig blk -> QueryVersion -> BlockNodeToClientVersion blk @@ -174,17 +357,21 @@ queryEncodeNodeToClient codecConfig queryVersion blockVersion (SomeSecond query) then a else throw $ QueryEncoderUnsupportedQuery (SomeSecond query) queryVersion + encodeBlockQuery :: + SingI footprint + => BlockQuery blk footprint result + -> Encoding encodeBlockQuery blockQuery = encodeNodeToClient @blk - @(SomeSecond BlockQuery blk) + @(SomeBlockQuery (BlockQuery blk)) codecConfig blockVersion - (SomeSecond blockQuery) + (SomeBlockQuery blockQuery) queryDecodeNodeToClient :: forall blk. - SerialiseNodeToClient blk (SomeSecond BlockQuery blk) + SerialiseNodeToClient blk (SomeBlockQuery (BlockQuery blk)) => CodecConfig blk -> QueryVersion -> BlockNodeToClientVersion blk @@ -217,18 +404,18 @@ queryDecodeNodeToClient codecConfig queryVersion blockVersion decodeBlockQuery :: Decoder s (SomeSecond Query blk) decodeBlockQuery = do - SomeSecond blockQuery <- decodeNodeToClient + SomeBlockQuery blockQuery <- decodeNodeToClient @blk - @(SomeSecond BlockQuery blk) + @(SomeBlockQuery (BlockQuery blk)) codecConfig blockVersion return (SomeSecond (BlockQuery blockQuery)) -instance ( SerialiseResult blk (BlockQuery blk) +instance ( SerialiseResult' blk BlockQuery , Serialise (HeaderHash blk) - ) => SerialiseResult blk (Query blk) where + ) => SerialiseResult blk Query where encodeResult codecConfig blockVersion (BlockQuery blockQuery) result - = encodeResult codecConfig blockVersion blockQuery result + = encodeResult' codecConfig blockVersion blockQuery result encodeResult _ _ GetSystemStart result = toCBOR result encodeResult _ _ GetChainBlockNo result @@ -237,66 +424,10 @@ instance ( SerialiseResult blk (BlockQuery blk) = encodePoint encode result decodeResult codecConfig blockVersion (BlockQuery query) - = decodeResult codecConfig blockVersion query + = decodeResult' codecConfig blockVersion query decodeResult _ _ GetSystemStart = fromCBOR decodeResult _ _ GetChainBlockNo = fromCBOR decodeResult _ _ GetChainPoint = decodePoint decode - -instance SameDepIndex (BlockQuery blk) => SameDepIndex (Query blk) where - sameDepIndex (BlockQuery blockQueryA) (BlockQuery blockQueryB) - = sameDepIndex blockQueryA blockQueryB - sameDepIndex (BlockQuery _) _ - = Nothing - sameDepIndex GetSystemStart GetSystemStart - = Just Refl - sameDepIndex GetSystemStart _ - = Nothing - sameDepIndex GetChainBlockNo GetChainBlockNo - = Just Refl - sameDepIndex GetChainBlockNo _ - = Nothing - sameDepIndex GetChainPoint GetChainPoint - = Just Refl - sameDepIndex GetChainPoint _ - = Nothing - -deriving instance Show (BlockQuery blk result) => Show (Query blk result) - --- | Answer the given query about the extended ledger state. -answerQuery :: - (BlockSupportsLedgerQuery blk, ConfigSupportsNode blk, HasAnnTip blk) - => ExtLedgerCfg blk - -> Query blk result - -> ExtLedgerState blk - -> result -answerQuery cfg query st = case query of - BlockQuery blockQuery -> answerBlockQuery cfg blockQuery st - GetSystemStart -> getSystemStart (topLevelConfigBlock (getExtLedgerCfg cfg)) - GetChainBlockNo -> headerStateBlockNo (headerState st) - GetChainPoint -> headerStatePoint (headerState st) - --- | Different queries supported by the ledger, indexed by the result type. -data family BlockQuery blk :: Type -> Type - --- | Query the ledger extended state. --- --- Used by the LocalStateQuery protocol to allow clients to query the extended --- ledger state. -class (ShowQuery (BlockQuery blk), SameDepIndex (BlockQuery blk)) - => BlockSupportsLedgerQuery blk where - - -- | Answer the given query about the extended ledger state. - answerBlockQuery :: - ExtLedgerCfg blk - -> BlockQuery blk result - -> ExtLedgerState blk - -> result - -instance SameDepIndex (BlockQuery blk) => Eq (SomeSecond BlockQuery blk) where - SomeSecond qry == SomeSecond qry' = isJust (sameDepIndex qry qry') - -deriving instance (forall result. Show (BlockQuery blk result)) - => Show (SomeSecond BlockQuery blk) diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Ledger/SupportsMempool.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Ledger/SupportsMempool.hs index 605c66bf00..27743fc699 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Ledger/SupportsMempool.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Ledger/SupportsMempool.hs @@ -1,8 +1,10 @@ +{-# LANGUAGE CPP #-} {-# LANGUAGE DataKinds #-} {-# LANGUAGE DerivingVia #-} {-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE FlexibleInstances #-} {-# LANGUAGE GeneralizedNewtypeDeriving #-} +{-# LANGUAGE StandaloneKindSignatures #-} {-# LANGUAGE TypeApplications #-} {-# LANGUAGE TypeFamilies #-} @@ -16,7 +18,9 @@ module Ouroboros.Consensus.Ledger.SupportsMempool ( , HasTxId (..) , HasTxs (..) , IgnoringOverflow (..) + , Invalidated (..) , LedgerSupportsMempool (..) + , ReapplyTxsResult (..) , TxId , TxLimits (..) , Validated @@ -29,6 +33,9 @@ import Control.Monad.Except import Data.ByteString.Short (ShortByteString) import Data.Coerce (coerce) import Data.DerivingVia (InstantiatedAt (..)) +#if __GLASGOW_HASKELL__ < 910 +import Data.Foldable +#endif import Data.Kind (Type) import Data.Measure (Measure) import qualified Data.Measure @@ -37,18 +44,20 @@ import GHC.Stack (HasCallStack) import NoThunks.Class import Ouroboros.Consensus.Block.Abstract import Ouroboros.Consensus.Ledger.Abstract -import Ouroboros.Consensus.Ticked +import Ouroboros.Consensus.Ledger.Tables.Utils -- | Generalized transaction -- -- The mempool (and, accordingly, blocks) consist of "generalized -- transactions"; this could be "proper" transactions (transferring funds) but -- also other kinds of things such as update proposals, delegations, etc. -data family GenTx blk :: Type +type GenTx :: Type -> Type +data family GenTx blk -- | Updating the ledger with a single transaction may result in a different -- error type as when updating it with a block -type family ApplyTxErr blk :: Type +type ApplyTxErr :: Type -> Type +type family ApplyTxErr blk -- | A flag indicating whether the mempool should reject a valid-but-problematic -- transaction, in order to to protect its author from penalties etc @@ -77,7 +86,6 @@ class ( UpdateLedger blk , TxLimits blk , NoThunks (GenTx blk) , NoThunks (Validated (GenTx blk)) - , NoThunks (Ticked (LedgerState blk)) , Show (GenTx blk) , Show (Validated (GenTx blk)) , Show (ApplyTxErr blk) @@ -96,8 +104,8 @@ class ( UpdateLedger blk -> WhetherToIntervene -> SlotNo -- ^ Slot number of the block containing the tx -> GenTx blk - -> TickedLedgerState blk - -> Except (ApplyTxErr blk) (TickedLedgerState blk, Validated (GenTx blk)) + -> TickedLedgerState blk ValuesMK + -> Except (ApplyTxErr blk) (TickedLedgerState blk DiffMK, Validated (GenTx blk)) -- | Apply a previously validated transaction to a potentially different -- ledger state @@ -109,14 +117,64 @@ class ( UpdateLedger blk => LedgerConfig blk -> SlotNo -- ^ Slot number of the block containing the tx -> Validated (GenTx blk) - -> TickedLedgerState blk - -> Except (ApplyTxErr blk) (TickedLedgerState blk) + -> TickedLedgerState blk ValuesMK + -> Except (ApplyTxErr blk) (TickedLedgerState blk ValuesMK) + + -- | Apply a list of previously validated transactions to a new ledger state. + -- + -- It is never the case that we reapply one single transaction, we always + -- reapply a list of transactions (and even one transaction can just be lifted + -- into the unary list). + -- + -- When reapplying a list of transactions, in the hard-fork instance we want + -- to first project everything into the particular block instance and then we + -- can inject/project the ledger tables only once. For single era blocks, this + -- is by default implemented as a fold using 'reapplyTx'. + -- + -- Notice: It is crucial that the list of validated transactions returned is + -- in the same order as they were given, as we will use those later on to + -- filter a list of 'TxTicket's. + reapplyTxs :: + HasCallStack + => LedgerConfig blk + -> SlotNo -- ^ Slot number of the block containing the tx + -> [Validated (GenTx blk)] + -> TickedLedgerState blk ValuesMK + -> ReapplyTxsResult blk + reapplyTxs cfg slot txs st = + (\(err, val, st') -> + ReapplyTxsResult + err + (reverse val) + (forgetTrackingValues . calculateDifference st $ st') + ) + $ foldl' (\(accE, accV, st') tx -> + case runExcept (reapplyTx cfg slot tx st') of + Left err -> (Invalidated tx err : accE, accV, st') + Right st'' -> (accE, tx : accV, st'') + ) ([], [], st) txs -- | Discard the evidence that transaction has been previously validated txForgetValidated :: Validated (GenTx blk) -> GenTx blk + -- | Given a transaction, get the key-sets that we need to apply it to a + -- ledger state. + getTransactionKeySets :: GenTx blk -> LedgerTables (LedgerState blk) KeysMK + +data ReapplyTxsResult blk = + ReapplyTxsResult { + -- | txs that are now invalid. Order doesn't matter + invalidatedTxs :: ![Invalidated blk] + -- | txs that are valid again, order must be the same as the order in + -- which txs were received + , validatedTxs :: ![Validated (GenTx blk)] + -- | Resulting ledger state + , resultingState :: !(TickedLedgerState blk DiffMK) + } + -- | A generalized transaction, 'GenTx', identifier. -data family TxId tx :: Type +type TxId :: Type -> Type +data family TxId blk -- | Transactions with an identifier -- @@ -212,7 +270,7 @@ class ( Measure (TxMeasure blk) txMeasure :: LedgerConfig blk -- ^ used at least by HFC's composition logic - -> TickedLedgerState blk + -> TickedLedgerState blk ValuesMK -> GenTx blk -> Except (ApplyTxErr blk) (TxMeasure blk) @@ -220,7 +278,7 @@ class ( Measure (TxMeasure blk) blockCapacityTxMeasure :: LedgerConfig blk -- ^ at least for symmetry with 'txMeasure' - -> TickedLedgerState blk + -> TickedLedgerState blk mk -> TxMeasure blk -- | We intentionally do not declare a 'Num' instance! We prefer @ByteSize32@ @@ -284,3 +342,9 @@ class HasByteSize a where instance HasByteSize ByteSize32 where txMeasureByteSize = id + +-- | A transaction that was previously valid. Used to clarify the types on the +-- 'reapplyTxs' function. +data Invalidated blk = Invalidated { getInvalidated :: Validated (GenTx blk) + , getReason :: ApplyTxErr blk + } diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Ledger/SupportsPeerSelection.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Ledger/SupportsPeerSelection.hs index be352e9f49..f94fd1c730 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Ledger/SupportsPeerSelection.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Ledger/SupportsPeerSelection.hs @@ -46,4 +46,4 @@ class LedgerSupportsPeerSelection blk where -- -- Note: if the ledger state is old, the registered relays can also be old and -- may no longer be online. - getPeers :: LedgerState blk -> [(PoolStake, NonEmpty StakePoolRelay)] + getPeers :: LedgerState blk mk -> [(PoolStake, NonEmpty StakePoolRelay)] diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Ledger/SupportsProtocol.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Ledger/SupportsProtocol.hs index 90939ac31c..55ea0f09f2 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Ledger/SupportsProtocol.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Ledger/SupportsProtocol.hs @@ -13,7 +13,9 @@ import Ouroboros.Consensus.Block import Ouroboros.Consensus.Forecast import Ouroboros.Consensus.HeaderValidation import Ouroboros.Consensus.Ledger.Abstract +import Ouroboros.Consensus.Ledger.Tables.Utils (forgetLedgerTables) import Ouroboros.Consensus.Protocol.Abstract +import Ouroboros.Consensus.Ticked -- | Link protocol to ledger class ( BlockSupportsProtocol blk @@ -25,7 +27,7 @@ class ( BlockSupportsProtocol blk -- See 'ledgerViewForecastAt' for a discussion and precise definition of the -- relation between this and forecasting. protocolLedgerView :: LedgerConfig blk - -> Ticked (LedgerState blk) + -> Ticked1 (LedgerState blk) mk -> LedgerView (BlockProtocol blk) -- | Get a forecast at the given ledger state. @@ -66,7 +68,7 @@ class ( BlockSupportsProtocol blk ledgerViewForecastAt :: HasCallStack => LedgerConfig blk - -> LedgerState blk + -> LedgerState blk mk -> Forecast (LedgerView (BlockProtocol blk)) -- | Relation between 'ledgerViewForecastAt' and 'applyChainTick' @@ -75,7 +77,7 @@ _lemma_ledgerViewForecastAt_applyChainTick , Eq (LedgerView (BlockProtocol blk)) ) => LedgerConfig blk - -> LedgerState blk + -> LedgerState blk mk -> Forecast (LedgerView (BlockProtocol blk)) -> SlotNo -> Either String () @@ -84,6 +86,7 @@ _lemma_ledgerViewForecastAt_applyChainTick cfg st forecast for , let lhs = forecastFor forecast for rhs = protocolLedgerView cfg . applyChainTick cfg for + . forgetLedgerTables $ st , Right lhs' <- runExcept lhs , lhs' /= rhs diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Ledger/Tables.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Ledger/Tables.hs new file mode 100644 index 0000000000..e922d82278 --- /dev/null +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Ledger/Tables.hs @@ -0,0 +1,388 @@ +{-# LANGUAGE BangPatterns #-} +{-# LANGUAGE CPP #-} +{-# LANGUAGE DataKinds #-} +{-# LANGUAGE DefaultSignatures #-} +{-# LANGUAGE FlexibleContexts #-} +{-# LANGUAGE FlexibleInstances #-} +{-# LANGUAGE StandaloneKindSignatures #-} +{-# LANGUAGE TypeFamilies #-} +{-# LANGUAGE TypeOperators #-} +{-# LANGUAGE UndecidableInstances #-} + +-- | This module defines the 'LedgerTables', a portion of the Ledger notion of a +-- /ledger state/ (not to confuse with our +-- 'Ouroboros.Consensus.Ledger.Basics.LedgerState') that together with it, +-- conforms a complete Ledger /ledger state/. +-- +-- 'LedgerTables' are parametrized by two types: keys and values. For now, their +-- only current instantiation is to hold the UTxO set, but future features will +-- extend this to hold other parts of the ledger state that now live in memory. +-- However, 'LedgerTables' don't necessarily have to contain maps from keys to +-- values, and the particular instantiation might choose to ignore some of those +-- types (as phantom types). See 'KeysMK' for an example. +-- +-- This type is used for two main purposes. Firstly, we use ledger tables to +-- /extract/ data from the /ledger state/ and store it on secondary storage (eg +-- a solid-state hard-drive). Secondly, when we load data from disk onto memory, +-- we use ledger tables to /inject/ data into the /ledger state/. This mechanism +-- allows us to keep most of the data on disk, which is rarely used, reducing +-- the memory usage of the Consensus layer. Ledger tables are used in the +-- 'Ouroboros.Consensus.Storage.LedgerDB.BackingStore' and +-- 'Ouroboros.Consensus.Storage.LedgerDB.DbChangelog' modules. +-- +-- = __Example__ +-- +-- As an example, consider a LedgerState that contains a Ledger /ledger state/ +-- (such as the @NewEpochState@) and a UTxO set: +-- +-- @ +-- data instance t'Ouroboros.Consensus.Ledger.Basics.LedgerState' (Block era) mk = LedgerState { +-- theLedgerLedgerState :: NewEpochState era +-- , theTables :: 'LedgerTables' (Block era) mk +-- } +-- @ +-- +-- The Ledger /ledger state/ contains a UTxO set as well, and with +-- @stowLedgerTables@ and @unstowLedgerTables@ we move those between the Ledger +-- /ledger state/ and the 'LedgerTables', for example: +-- +-- @ +-- 'unstowLedgerTables' (LedgerState { +-- theLedgerLedgerState = NewEpochState { +-- ... +-- , utxoSet = Map.fromList [(\'a\', 100), (\'b\', 100), ...] +-- } +-- , theTables = 'EmptyMK' +-- }) +-- == +-- LedgerState { +-- theLedgerLedgerState = NewEpochState { +-- ... +-- , utxoSet = Map.empty +-- } +-- , theTables = 'ValuesMK' (Map.fromList [(\'a\', 100), (\'b\', 100), ...]) +-- }) +-- @ +-- +-- @ +-- 'stowLedgerTables' (LedgerState { +-- theLedgerLedgerState = NewEpochState { +-- ... +-- , utxoSet = Map.empty +-- } +-- , theTables = 'ValuesMK' (Map.fromList [(\'a\', 100), (\'b\', 100), ...]) +-- }) +-- == +-- LedgerState { +-- theLedgerLedgerState = NewEpochState { +-- ... +-- , utxoSet = Map.fromList [(\'a\', 100), (\'b\', 100), ...] +-- } +-- , theTables = 'EmptyMK' +-- }) +-- @ +-- +-- Using these functions we can extract the data from the Ledger /ledger state/ +-- for us Consensus to manipulate, and we can then inject it back so that we +-- provide the expected data to the ledger. Note that the Ledger rules for +-- applying a block are defined in a way that it only needs the subset of the +-- UTxO set that the block being applied will consume. See [the @DbChangelog@ +-- documentation for block +-- application](Ouroboros-Consensus-Storage-LedgerDB-DbChangelog.html#g:applying). +-- +-- Now using 'Ouroboros.Consensus.Ledger.Tables.Utils.calculateDifference', we +-- can compare two (successive) t'Ouroboros.Consensus.Ledger.Basics.LedgerState's +-- to produce differences: +-- +-- @ +-- 'Ouroboros.Consensus.Ledger.Tables.Utils.calculateDifference' +-- (LedgerState { +-- ... +-- , theTables = 'ValuesMK' (Map.fromList [(\'a\', 100), (\'b\', 100)]) +-- }) +-- (LedgerState { +-- ... +-- , theTables = 'ValuesMK' (Map.fromList [(\'a\', 100), (\'c\', 200)]) +-- }) +-- == +-- 'TrackingMK' +-- (Map.fromList [(\'a\', 100), (\'c\', 200)]) +-- (Map.fromList [(\'b\', [Delete]), (\'c\', [Insert 200])]) +-- @ +-- +-- This operation provided a 'TrackingMK' which is in fact just a 'ValuesMK' and +-- 'DiffMK' put together. +-- +-- We can then use those differences to /forward/ a set of values, so for +-- example (taking the example above): +-- +-- @ +-- let state1 = LedgerState { +-- ... +-- , theTables = 'ValuesMK' (Map.fromList [(\'a\', 100), (\'b\', 100)]) +-- } +-- state2 = LedgerState { +-- ... +-- , theTables = 'ValuesMK' (Map.fromList [(\'a\', 100), (\'c\', 200)]) +-- } +-- state3 = LedgerState { +-- ... +-- , theTables = 'ValuesMK' (Map.fromList []) +-- } +-- in +-- 'Ouroboros.Consensus.Ledger.Tables.Utils.applyDiffs' state3 ('Ouroboros.Consensus.Ledger.Tables.Utils.forgetTrackingValues' $ 'Ouroboros.Consensus.Ledger.Tables.Utils.calculateDifference' state1 state2) +-- == +-- LedgerState { +-- ... +-- , theTables = 'ValuesMK' (Map.fromList [(\'c\', 200)]) +-- } +-- @ +-- +-- Notice that we produced differences for @\'b\'@ and @\'c\'@, but as the input +-- state (@state3@) didn't contain @\'b\'@ the only difference that was applied +-- was the one of @\'c\'@. +-- +-- Also when applying a block that contains some transactions, we can produce +-- 'LedgerTable's of @KeysMK@, by gathering the txins required by the +-- transactions: +-- +-- @ +-- 'Ouroboros.Consensus.Ledger.Abstract.getBlockKeySets' (Block {..., txs = [Tx { input = [\'a\', \'b\'], outputs = [\'c\', \'d\'] }]}) +-- == 'KeysMK' (Set.fromList [\'a\', \'b\']) +-- @ +-- +-- We shall use those later on to read the txouts from some storage (which will +-- be the 'Ouroboros.Consensus.Storage.LedgerDB.BackingStore.BackingStore') and +-- forward the resulting txouts through a sequence of differences (which will be +-- 'Ouroboros.Consensus.Storage.LedgerDB.DbChangelog.adcDiffs'). +-- +-- This example already covered most of the standard mapkinds, in particular: +-- +-- ['EmptyMK']: A nullary data constructor, an empty table. +-- +-- ['ValuesMK']: Contains a @Data.Map@ from txin to txouts. +-- +-- ['DiffMK']: Contains a @Data.Map@ from txin to history of changes (see +-- "Data.Map.Diff.Strict"). +-- +-- ['TrackingMK']: Contains both a 'ValuesMK' and 'DiffMK'. +-- +-- ['KeysMK']: Contains a @Data.Set@ of txins. +-- +-- ['SeqDiffMK']: A fingertree of 'DiffMK's. +module Ouroboros.Consensus.Ledger.Tables ( + -- * Core + module Ouroboros.Consensus.Ledger.Tables.Basics + , module Ouroboros.Consensus.Ledger.Tables.MapKind + -- * Utilities + , module Ouroboros.Consensus.Ledger.Tables.Combinators + -- * Basic LedgerState classes + , CanStowLedgerTables (..) + , HasLedgerTables (..) + , HasTickedLedgerTables + -- * Serialization + , CanSerializeLedgerTables + , codecLedgerTables + , valuesMKDecoder + , valuesMKEncoder + -- * Special classes + , LedgerTablesAreTrivial + , convertMapKind + , trivialLedgerTables + ) where + +import Cardano.Binary (FromCBOR (fromCBOR), ToCBOR (toCBOR)) +import qualified Codec.CBOR.Decoding as CBOR +import qualified Codec.CBOR.Encoding as CBOR +import qualified Control.Exception as Exn +import Control.Monad (replicateM) +import Data.Kind (Constraint) +import qualified Data.Map.Strict as Map +import Data.Void (Void) +import NoThunks.Class (NoThunks (..)) +import Ouroboros.Consensus.Ledger.Tables.Basics +import Ouroboros.Consensus.Ledger.Tables.Combinators +import Ouroboros.Consensus.Ledger.Tables.MapKind +import Ouroboros.Consensus.Ticked + +{------------------------------------------------------------------------------- + Basic LedgerState classes +-------------------------------------------------------------------------------} + +-- | Extracting @'LedgerTables'@ from @l mk@ (which will share the same @mk@), +-- or replacing the @'LedgerTables'@ associated to a particular @l@. +type HasLedgerTables :: LedgerStateKind -> Constraint +class ( Ord (Key l) + , Eq (Value l) + , Show (Key l) + , Show (Value l) + , NoThunks (Key l) + , NoThunks (Value l) + ) => HasLedgerTables l where + + -- | Extract the ledger tables from a ledger state + -- + -- The constraints on @mk@ are necessary because the 'CardanoBlock' instance + -- uses them. + projectLedgerTables :: + (CanMapMK mk, CanMapKeysMK mk, ZeroableMK mk) + => l mk + -> LedgerTables l mk + default projectLedgerTables :: + (ZeroableMK mk, LedgerTablesAreTrivial l) + => l mk + -> LedgerTables l mk + projectLedgerTables _ = trivialLedgerTables + + -- | Overwrite the tables in the given ledger state. + -- + -- The contents of the tables should not be /younger/ than the content of the + -- ledger state. In particular, for a + -- 'Ouroboros.Consensus.HardFork.Combinator.Basics.HardForkBlock' ledger, the + -- tables argument should not contain any data from eras that succeed the + -- current era of the ledger state argument. + -- + -- The constraints on @mk@ are necessary because the 'CardanoBlock' instance + -- uses them. + withLedgerTables :: + (CanMapMK mk, CanMapKeysMK mk, ZeroableMK mk) + => l any + -> LedgerTables l mk + -> l mk + default withLedgerTables :: + LedgerTablesAreTrivial l + => l any + -> LedgerTables l mk + -> l mk + withLedgerTables st _ = convertMapKind st + +instance ( Ord (Key l) + , Eq (Value l) + , Show (Key l) + , Show (Value l) + , NoThunks (Key l) + , NoThunks (Value l) + ) => HasLedgerTables (LedgerTables l) where + projectLedgerTables = castLedgerTables + withLedgerTables _ = castLedgerTables + +-- | Convenience class, useful for partially applying the composition of +-- 'HasLedgerTables' and 'Ticked1'. +type HasTickedLedgerTables :: LedgerStateKind -> Constraint +class HasLedgerTables (Ticked1 l) => HasTickedLedgerTables l where +instance HasLedgerTables (Ticked1 l) => HasTickedLedgerTables l + +-- | LedgerTables are projections of data from a LedgerState and as such they +-- can be injected back into a LedgerState. This is necessary because the Ledger +-- rules are unaware of UTxO-HD changes. Thus, by stowing the ledger tables, we are +-- able to provide a Ledger State with a restricted UTxO set that is enough to +-- execute the Ledger rules. +-- +-- In particular, HardForkBlock LedgerStates are never given diretly to the +-- ledger but rather unwrapped and then it is the inner ledger state the one we +-- give to the ledger. This means that all the single era blocks must be an +-- instance of this class, but HardForkBlocks might avoid doing so. +type CanStowLedgerTables :: LedgerStateKind -> Constraint +class CanStowLedgerTables l where + + stowLedgerTables :: l ValuesMK -> l EmptyMK + default stowLedgerTables :: + (LedgerTablesAreTrivial l) + => l ValuesMK + -> l EmptyMK + stowLedgerTables = convertMapKind + + unstowLedgerTables :: l EmptyMK -> l ValuesMK + default unstowLedgerTables :: + (LedgerTablesAreTrivial l) + => l EmptyMK + -> l ValuesMK + unstowLedgerTables = convertMapKind + +{------------------------------------------------------------------------------- + Serialization Codecs +-------------------------------------------------------------------------------} + +-- | This class provides a 'CodecMK' that can be used to encode/decode keys and +-- values on @'LedgerTables' l mk@ +-- +-- TODO: can this be removed in favour of EncodeDisk and DecodeDisk? +type CanSerializeLedgerTables :: LedgerStateKind -> Constraint +class CanSerializeLedgerTables l where + codecLedgerTables :: LedgerTables l CodecMK + default codecLedgerTables :: + ( FromCBOR (Key l), FromCBOR (Value l) + , ToCBOR (Key l), ToCBOR (Value l) + ) + => LedgerTables l CodecMK + codecLedgerTables = LedgerTables $ CodecMK toCBOR toCBOR fromCBOR fromCBOR + +-- | Default encoder of @'LedgerTables' l ''ValuesMK'@ to be used by the +-- in-memory backing store. +valuesMKEncoder :: + ( HasLedgerTables l + , CanSerializeLedgerTables l + ) + => LedgerTables l ValuesMK + -> CBOR.Encoding +valuesMKEncoder tables = + CBOR.encodeListLen (ltcollapse $ ltmap (K2 . const 1) tables) + <> ltcollapse (ltliftA2 (K2 .: go) codecLedgerTables tables) + where + go :: CodecMK k v -> ValuesMK k v -> CBOR.Encoding + go (CodecMK encK encV _decK _decV) (ValuesMK m) = + CBOR.encodeMapLen (fromIntegral $ Map.size m) + <> Map.foldMapWithKey (\k v -> encK k <> encV v) m + +-- | Default decoder of @'LedgerTables' l ''ValuesMK'@ to be used by the +-- in-memory backing store. +valuesMKDecoder :: + ( HasLedgerTables l + , CanSerializeLedgerTables l + ) + => CBOR.Decoder s (LedgerTables l ValuesMK) +valuesMKDecoder = do + numTables <- CBOR.decodeListLen + if numTables == 0 + then + return $ ltpure emptyMK + else do + mapLen <- CBOR.decodeMapLen + ret <- lttraverse (go mapLen) codecLedgerTables + Exn.assert (ltcollapse (ltmap (K2 . const 1) ret) == numTables) + $ return ret + where + go :: Ord k + => Int + -> CodecMK k v + -> CBOR.Decoder s (ValuesMK k v) + go len (CodecMK _encK _encV decK decV) = + ValuesMK . Map.fromList + <$> replicateM len (do + !k <- decK + !v <- decV + pure (k, v)) + +{------------------------------------------------------------------------------- + Special classes of ledger states +-------------------------------------------------------------------------------} + +-- | For some ledger states we won't be defining 'LedgerTables' and instead the +-- ledger state will be fully stored in memory, as before UTxO-HD. The ledger +-- states that are defined this way can be made instances of this class which +-- allows for easy manipulation of the types of @mk@ required at any step of the +-- program. +type LedgerTablesAreTrivial :: LedgerStateKind -> Constraint +class (Key l ~ Void, Value l ~ Void) => LedgerTablesAreTrivial l where + -- | If the ledger state is always in memory, then @l mk@ will be isomorphic + -- to @l mk'@ for all @mk@, @mk'@. As a result, we can convert between ledgers + -- states indexed by different map kinds. + -- + -- This function is useful to combine functions that operate on functions that + -- transform the map kind on a ledger state (eg @applyChainTickLedgerResult@). + convertMapKind :: l mk -> l mk' + +trivialLedgerTables :: + (ZeroableMK mk, LedgerTablesAreTrivial l) + => LedgerTables l mk +trivialLedgerTables = LedgerTables emptyMK diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Ledger/Tables/Basics.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Ledger/Tables/Basics.hs new file mode 100644 index 0000000000..6c348d142a --- /dev/null +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Ledger/Tables/Basics.hs @@ -0,0 +1,104 @@ +{-# LANGUAGE ConstraintKinds #-} +{-# LANGUAGE DataKinds #-} +{-# LANGUAGE DeriveGeneric #-} +{-# LANGUAGE DerivingStrategies #-} +{-# LANGUAGE GeneralisedNewtypeDeriving #-} +{-# LANGUAGE RankNTypes #-} +{-# LANGUAGE StandaloneDeriving #-} +{-# LANGUAGE StandaloneKindSignatures #-} +{-# LANGUAGE TypeFamilies #-} +{-# LANGUAGE TypeOperators #-} +{-# LANGUAGE UndecidableInstances #-} + +module Ouroboros.Consensus.Ledger.Tables.Basics ( + -- * Kinds + -- + -- | For convenience' sake, we define these kinds which convey the intended + -- instantiation for the type variables. + LedgerStateKind + , MapKind + -- * Ledger tables + , Castable + , Key + , LedgerTables (..) + , Value + , castLedgerTables + ) where + +import Data.Coerce (coerce) +import Data.Kind (Type) +import GHC.Generics (Generic) +import NoThunks.Class (NoThunks) +import Ouroboros.Consensus.Ticked (Ticked1) + +{------------------------------------------------------------------------------- + Kinds +-------------------------------------------------------------------------------} + +-- | Something that holds two types, which intend to represent /keys/ and +-- /values/. +type MapKind = {- key -} Type -> {- value -} Type -> Type +type LedgerStateKind = MapKind -> Type + +{------------------------------------------------------------------------------- + Ledger tables +-------------------------------------------------------------------------------} + +-- | The Ledger Tables represent the portion of the data on disk that has been +-- pulled from disk and attached to the in-memory Ledger State or that will +-- eventually be written to disk. +-- +-- With UTxO-HD and the split of the Ledger /ledger state/ into the in-memory +-- part and the on-disk part, this splitting was reflected in the new type +-- parameter added to the (Consensus) +-- 'Ouroboros.Consensus.Ledger.Basics.LedgerState', to which we refer as "the +-- MapKind" or @mk@. +-- +-- Every 'Ouroboros.Consensus.Ledger.Basics.LedgerState' (or @LedgerState@-like +-- type, such as the 'Ouroboros.Consensus.Ledger.Extended.ExtLedgerState') is +-- associated with a 'LedgerTables' and they both share the @mk@. They both are +-- of kind 'LedgerStateKind'. 'LedgerTables' is just a way to refer /only/ to a +-- partial view of the on-disk data without having the rest of the in-memory +-- 'LedgerState' in scope. +-- +-- The @mk@ can be instantiated to anything that is map-like, i.e. that expects +-- two type parameters, the key and the value. +type LedgerTables :: LedgerStateKind -> MapKind -> Type +newtype LedgerTables l mk = LedgerTables { + getLedgerTables :: mk (Key l) (Value l) + } + deriving stock Generic + +deriving stock instance Show (mk (Key l) (Value l)) + => Show (LedgerTables l mk) +deriving stock instance Eq (mk (Key l) (Value l)) + => Eq (LedgerTables l mk) +deriving newtype instance NoThunks (mk (Key l) (Value l)) + => NoThunks (LedgerTables l mk) + +-- | Each @LedgerState@ instance will have the notion of a @Key@ for the tables. +-- For instance, if we only pulled out only the UTxO set from the ledger state, +-- this type would be @TxIn@. See +-- "Ouroboros.Consensus.HardFork.Combinator.Ledger". +type Key :: LedgerStateKind -> Type +type family Key l -- TODO: rename to TxIn + +-- | Each @LedgerState@ instance will have the notion of a @Value@ for the +-- tables. For instance, if we only pulled out only the UTxO set from the ledger +-- state, this type would be @TxOut@ or @NS TxOut@. +type Value :: LedgerStateKind -> Type +type family Value l -- TODO: rename to TxOut + +type instance Key (LedgerTables l) = Key l +type instance Value (LedgerTables l) = Value l +type instance Key (Ticked1 l) = Key l +type instance Value (Ticked1 l) = Value l + +type Castable l l' = (Key l ~ Key l', Value l ~ Value l') + +castLedgerTables :: + forall l' l mk. Castable l l' + => LedgerTables l mk + -> LedgerTables l' mk +castLedgerTables = coerce + diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Ledger/Tables/Combinators.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Ledger/Tables/Combinators.hs new file mode 100644 index 0000000000..4276436862 --- /dev/null +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Ledger/Tables/Combinators.hs @@ -0,0 +1,277 @@ +{-# LANGUAGE ConstraintKinds #-} +{-# LANGUAGE DeriveTraversable #-} +{-# LANGUAGE DerivingStrategies #-} +{-# LANGUAGE FlexibleContexts #-} +{-# LANGUAGE GeneralisedNewtypeDeriving #-} +{-# LANGUAGE InstanceSigs #-} +{-# LANGUAGE PolyKinds #-} +{-# LANGUAGE QuantifiedConstraints #-} +{-# LANGUAGE RankNTypes #-} +{-# LANGUAGE StandaloneDeriving #-} +{-# LANGUAGE StandaloneKindSignatures #-} +{-# LANGUAGE TypeOperators #-} +{-# LANGUAGE UndecidableInstances #-} + +{-# OPTIONS_GHC -Wno-orphans #-} + +-- | Ledger tables are barbie-types (see @barbies@ package), though unfortunately +-- we can not implement classes like 'FunctorB' for ledger tables because the +-- class expects a type that is indexed over a /(uni-)functor/. Ledger tables +-- are indexed over /bifunctors/ (mapkinds), so the kinds do not match. To cut +-- on boilerplate, we do not define variants of 'FunctorB' (and similar classes) +-- for types that are indexed over bifunctors. Instead, we define specialised +-- variants of class functions and utility functions. For example: +-- +-- * 'ltmap' instead of 'bmap' or 'bmapC' +-- +-- * 'lttraverse' instead of 'btraverse' or 'btraverseC' +-- +-- * 'ltsequence' instead of 'bsequence'. +-- +-- TODO: if we make mapkinds of kind @(k1, k2) -> Type@ instead of @k1 -> k2 -> +-- Type@, then we could reuse most of the @barbies@ machinery. +module Ouroboros.Consensus.Ledger.Tables.Combinators ( + -- * Common constraints + LedgerTableConstraints + -- * Functor + , ltmap + -- * Traversable + , lttraverse + -- ** Utility functions + , ltsequence + -- * Applicative + , ltprod + , ltpure + -- ** Utility functions + , ltap + , ltliftA + , ltliftA2 + , ltliftA3 + , ltliftA4 + -- * Applicative and Traversable + , ltzipWith3A + -- * Collapsing + , ltcollapse + -- * Lifted functions + , fn2_1 + , fn2_2 + , fn2_3 + , fn2_4 + , type (-..->) (..) + -- ** Re-exports of utils + , (...:) + , (..:) + , (.:) + -- * Basic bifunctors + , K2 (..) + , type (:..:) (..) + ) where + +import Data.Bifunctor +import Data.Kind +import Data.SOP.Functors +import Ouroboros.Consensus.Ledger.Tables.Basics +import Ouroboros.Consensus.Util ((...:), (..:), (.:)) + +{------------------------------------------------------------------------------- + Common constraints +-------------------------------------------------------------------------------} + +type LedgerTableConstraints l = (Ord (Key l), Eq (Value l)) + +{------------------------------------------------------------------------------- + Functor +-------------------------------------------------------------------------------} + +-- | Like 'bmap', but for ledger tables. +ltmap :: + LedgerTableConstraints l + => (forall k v. (Ord k, Eq v) => mk1 k v -> mk2 k v) + -> LedgerTables l mk1 + -> LedgerTables l mk2 +ltmap f (LedgerTables x) = LedgerTables $ f x + +{------------------------------------------------------------------------------- + Traversable +-------------------------------------------------------------------------------} + +-- | Like 'btraverse', but for ledger tables. +lttraverse :: + (Applicative f, LedgerTableConstraints l) + => (forall k v. (Ord k, Eq v) => mk1 k v -> f (mk2 k v)) + -> LedgerTables l mk1 + -> f (LedgerTables l mk2) +lttraverse f (LedgerTables x) = LedgerTables <$> f x + +-- +-- Utility functions +-- + +ltsequence :: + (Applicative f, LedgerTableConstraints l) + => LedgerTables l (f :..: mk) + -> f (LedgerTables l mk) +ltsequence = lttraverse unComp2 + +{------------------------------------------------------------------------------- + Applicative +-------------------------------------------------------------------------------} + +-- | Like 'bpure', but for ledger tables. +ltpure :: + LedgerTableConstraints l + => (forall k v. (Ord k, Eq v) => mk k v) + -> LedgerTables l mk +ltpure = LedgerTables + +-- | Like 'bprod', but for ledger tables. +ltprod :: LedgerTables l f -> LedgerTables l g -> LedgerTables l (f `Product2` g) +ltprod (LedgerTables x) (LedgerTables y) = LedgerTables (Pair2 x y) + +-- +-- Utility functions +-- + +ltap :: + LedgerTableConstraints l + => LedgerTables l (mk1 -..-> mk2) + -> LedgerTables l mk1 + -> LedgerTables l mk2 +ltap f x = ltmap g $ ltprod f x + where g (Pair2 f' x') = apFn2 f' x' + +ltliftA :: + LedgerTableConstraints l + => (forall k v. (Ord k, Eq v) => mk1 k v -> mk2 k v) + -> LedgerTables l mk1 + -> LedgerTables l mk2 +ltliftA f x = ltpure (fn2_1 f) `ltap` x + +ltliftA2 :: + LedgerTableConstraints l + => (forall k v. (Ord k, Eq v) => mk1 k v -> mk2 k v -> mk3 k v) + -> LedgerTables l mk1 + -> LedgerTables l mk2 + -> LedgerTables l mk3 +ltliftA2 f x x' = ltpure (fn2_2 f) `ltap` x `ltap` x' + +ltliftA3 :: + LedgerTableConstraints l + => (forall k v. (Ord k, Eq v) => mk1 k v -> mk2 k v -> mk3 k v -> mk4 k v) + -> LedgerTables l mk1 + -> LedgerTables l mk2 + -> LedgerTables l mk3 + -> LedgerTables l mk4 +ltliftA3 f x x' x'' = ltpure (fn2_3 f) `ltap` x `ltap` x' `ltap` x'' + +ltliftA4 :: + LedgerTableConstraints l + => ( forall k v. (Ord k, Eq v) + => mk1 k v -> mk2 k v -> mk3 k v -> mk4 k v -> mk5 k v + ) + -> LedgerTables l mk1 + -> LedgerTables l mk2 + -> LedgerTables l mk3 + -> LedgerTables l mk4 + -> LedgerTables l mk5 +ltliftA4 f x x' x'' x''' = + ltpure (fn2_4 f) `ltap` x `ltap` x' `ltap` x'' `ltap` x''' + +{------------------------------------------------------------------------------- + Applicative and Traversable +-------------------------------------------------------------------------------} + +ltzipWith3A :: + (Applicative f, LedgerTableConstraints l) + => (forall k v. (Ord k, Eq v) => mk1 k v -> mk2 k v -> mk3 k v -> f (mk4 k v)) + -> LedgerTables l mk1 + -> LedgerTables l mk2 + -> LedgerTables l mk3 + -> f (LedgerTables l mk4) +ltzipWith3A f = ltsequence ..: ltliftA3 (Comp2 ..: f) + +{------------------------------------------------------------------------------- + Collapsing +-------------------------------------------------------------------------------} + +ltcollapse :: LedgerTables l (K2 a) -> a +ltcollapse = unK2 . getLedgerTables + +{------------------------------------------------------------------------------- + Semigroup and Monoid +-------------------------------------------------------------------------------} + +instance ( forall k v. (Ord k, Eq v) => Semigroup (mk k v) + , LedgerTableConstraints l + ) => Semigroup (LedgerTables l mk) where + (<>) :: LedgerTables l mk -> LedgerTables l mk -> LedgerTables l mk + (<>) = ltliftA2 (<>) + +instance ( forall k v. (Ord k, Eq v) => Monoid (mk k v) + , LedgerTableConstraints l + ) => Monoid (LedgerTables l mk) where + mempty :: LedgerTables l mk + mempty = ltpure mempty + +{------------------------------------------------------------------------------- + Lifted functions +-------------------------------------------------------------------------------} + +-- | Lifted functions +-- +-- Similar to '(-.->)', but for @f@ and @g@ that are bifunctors. +type (-..->) :: (k1 -> k2 -> Type) -> (k1 -> k2 -> Type) -> k1 -> k2 -> Type +newtype (f -..-> g) a b = Fn2 { apFn2 :: f a b -> g a b } + +infixr 1 -..-> + +-- | Construct a lifted function. +fn2_1 :: (f a b -> g a b) -> (f -..-> g) a b +fn2_1 = Fn2 + +-- | Construct a binary lifted function +fn2_2 :: (f a b -> f' a b -> f'' a b ) -> (f -..-> f' -..-> f'') a b +fn2_2 f = Fn2 $ \x -> Fn2 $ \x' -> f x x' + +-- | Construct a ternary lifted function. +fn2_3 :: + (f a b -> f' a b -> f'' a b -> f''' a b) + -> (f -..-> f' -..-> f'' -..-> f''') a b +fn2_3 f = Fn2 $ \x -> Fn2 $ \x' -> Fn2 $ \x'' -> f x x' x'' + +-- | Construct a quaternary lifted function. +fn2_4 :: + (f a b -> f' a b -> f'' a b -> f''' a b -> f'''' a b) + -> (f -..-> f' -..-> f'' -..-> f''' -..-> f'''') a b +fn2_4 f = Fn2 $ \x -> Fn2 $ \x' -> Fn2 $ \x'' -> Fn2 $ \x''' -> f x x' x'' x''' + +{------------------------------------------------------------------------------- + Basic bifunctors +-------------------------------------------------------------------------------} + +-- | The constant type bifunctor. +type K2 :: Type -> k1 -> k2 -> Type +newtype K2 a b c = K2 { unK2 :: a } + deriving stock (Show, Eq) + deriving stock (Functor, Foldable, Traversable) + deriving newtype (Monoid, Semigroup) + +instance Bifunctor (K2 a) where + bimap _ _ (K2 x) = K2 x + +-- | Composition of functor after bifunctor. +-- +-- Example: @Comp2 (Just (17, True)) :: (Maybe :..: (,)) Int Bool@ +type (:..:) :: (k3 -> Type) -> (k1 -> k2 -> k3) -> k1 -> k2 -> Type +newtype (:..:) f g a b = Comp2 { unComp2 :: f (g a b) } + deriving stock (Show, Eq) + deriving stock (Functor, Foldable) + deriving newtype (Monoid, Semigroup) + +infixr 7 :..: + +deriving stock instance (Traversable f, Traversable (g a)) + => Traversable ((f :..: g) a) + +instance (Functor f, Bifunctor g) => Bifunctor (f :..: g) where + bimap f g (Comp2 x) = Comp2 $ fmap (bimap f g) x diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Ledger/Tables/Diff.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Ledger/Tables/Diff.hs new file mode 100644 index 0000000000..6ab7833202 --- /dev/null +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Ledger/Tables/Diff.hs @@ -0,0 +1,227 @@ +{-# LANGUAGE DeriveAnyClass #-} +{-# LANGUAGE DeriveFunctor #-} +{-# LANGUAGE DeriveGeneric #-} +{-# LANGUAGE DerivingVia #-} +{-# LANGUAGE GeneralizedNewtypeDeriving #-} +{-# LANGUAGE InstanceSigs #-} + +module Ouroboros.Consensus.Ledger.Tables.Diff ( + -- * Types + Delta (..) + , Diff (..) + -- * Conversion + , keysSet + -- * Construction + , diff + -- ** Maps + , fromMap + , fromMapDeletes + , fromMapInserts + -- ** Set + , fromSetDeletes + -- ** Lists + , fromList + , fromListDeletes + , fromListInserts + -- * Query + -- ** Size + , null + , numDeletes + , numInserts + , size + -- * Applying diffs + , applyDiff + , applyDiffForKeys + -- * Filter + , filterOnlyKey + , foldMapDelta + , fromAntiDiff + , toAntiDiff + , traverseDeltaWithKey_ + ) where + +import Control.Monad (void) +import Data.Bifunctor +import Data.Foldable (foldMap') +import qualified Data.Map.Diff.Strict.Internal as Anti +import qualified Data.Map.Merge.Strict as Merge +import Data.Map.Strict (Map) +import qualified Data.Map.Strict as Map +import Data.Monoid +import Data.Set (Set) +import qualified Data.Set as Set +import GHC.Generics +import NoThunks.Class +import Prelude hiding (null) + +{------------------------------------------------------------------------------ + Types +------------------------------------------------------------------------------} + +newtype Diff k v = Diff (Map k (Delta v)) + deriving stock (Show, Eq) + deriving Generic + deriving newtype NoThunks + +-- | Custom 'Functor' instance, since @'Functor' ('Map' k)@ is actually the +-- 'Functor' instance for a lazy Map. +instance Functor (Diff k) where + fmap f (Diff m) = Diff $ Map.map (fmap f) m + +instance Ord k => Semigroup (Diff k v) where + (<>) :: Diff k v -> Diff k v -> Diff k v + (Diff m1) <> (Diff m2) = Diff $ Map.unionWith (<>) m1 m2 + +instance Ord k => Monoid (Diff k v) where + mempty :: Diff k v + mempty = Diff mempty + +data Delta v = + Insert !v + | Delete + deriving stock (Show, Eq, Functor) + deriving Generic + deriving NoThunks + +-- | Right-biased +instance Semigroup (Delta v) where + _d1 <> d2 = d2 + +{------------------------------------------------------------------------------ + Conversion +------------------------------------------------------------------------------} + +keysSet :: Diff k v -> Set k +keysSet (Diff m) = Map.keysSet m + +{------------------------------------------------------------------------------ + Construction +------------------------------------------------------------------------------} + +diff :: (Ord k, Eq v) => Map k v -> Map k v -> Diff k v +diff m1 m2 = Diff $ + Merge.merge + (Merge.mapMissing $ \_k _v -> Delete) + (Merge.mapMissing $ \_k v -> Insert v) + (Merge.zipWithMaybeMatched $ \ _k v1 v2 -> + if v1 == v2 then Nothing + else Just (Insert v2)) + m1 + m2 + +fromMap :: Map k (Delta v) -> Diff k v +fromMap = Diff + +fromMapInserts :: Map k v -> Diff k v +fromMapInserts = Diff . Map.map Insert + +fromMapDeletes :: Map k v -> Diff k v +fromMapDeletes = Diff . Map.map (const Delete) + +fromSetDeletes :: Set k -> Diff k v +fromSetDeletes = Diff . Map.fromSet (const Delete) + +fromList :: Ord k => [(k, Delta v)] -> Diff k v +fromList = Diff . Map.fromList + +fromListInserts :: Ord k => [(k, v)] -> Diff k v +fromListInserts = Diff . Map.fromList . fmap (second Insert) + +fromListDeletes :: Ord k => [(k, v)] -> Diff k v +fromListDeletes = Diff . Map.fromList . fmap (second (const Delete)) + +{------------------------------------------------------------------------------ + Query +------------------------------------------------------------------------------} + +null :: Diff k v -> Bool +null (Diff m) = Map.null m + +size :: Diff k v -> Int +size (Diff m) = Map.size m +numInserts :: Diff k v -> Int +numInserts (Diff m) = getSum $ foldMap' f m + where + f (Insert _) = 1 + f Delete = 0 + +numDeletes :: Diff k v -> Int +numDeletes (Diff m) = getSum $ foldMap' f m + where + f (Insert _) = 0 + f Delete = 1 + +{------------------------------------------------------------------------------ + Applying diffs +------------------------------------------------------------------------------} + +applyDiff :: + Ord k + => Map k v + -> Diff k v + -> Map k v +applyDiff m (Diff diffs) = + Merge.merge + Merge.preserveMissing + (Merge.mapMaybeMissing newKeys) + (Merge.zipWithMaybeMatched oldKeys) + m + diffs + where + newKeys :: k -> Delta v -> Maybe v + newKeys _k (Insert x) = Just x + newKeys _k Delete = Nothing + + oldKeys :: k -> v -> Delta v -> Maybe v + oldKeys _k _v1 (Insert x) = Just x + oldKeys _k _v1 Delete = Nothing + +applyDiffForKeys :: + Ord k + => Map k v + -> Set k + -> Diff k v + -> Map k v +applyDiffForKeys m ks (Diff diffs) = + applyDiff + m + (Diff $ diffs `Map.restrictKeys` (Map.keysSet m `Set.union` ks)) + +{------------------------------------------------------------------------------- + Filter +-------------------------------------------------------------------------------} + +filterOnlyKey :: (k -> Bool) -> Diff k v -> Diff k v +filterOnlyKey f (Diff m) = Diff $ Map.filterWithKey (const . f) m + +{------------------------------------------------------------------------------- + From-to anti-diffs +-------------------------------------------------------------------------------} + +fromAntiDiff :: Anti.Diff k v -> Diff k v +fromAntiDiff (Anti.Diff d) = Diff (Map.map (f . Anti.last) d) + where + f (Anti.Insert v) = Insert v + f Anti.Delete{} = Delete + +toAntiDiff :: Diff k v -> Anti.Diff k v +toAntiDiff (Diff d) = Anti.Diff (Map.map f d) + where + f (Insert v) = Anti.singletonInsert v + f Delete = Anti.singletonDelete + +{------------------------------------------------------------------------------- + Traversals and folds +-------------------------------------------------------------------------------} + +-- | Traversal with keys over the deltas. +traverseDeltaWithKey_ :: + Applicative t + => (k -> Delta v -> t a) + -> Diff k v + -> t () +traverseDeltaWithKey_ f (Diff m) = void $ Map.traverseWithKey f m + +-- | @'foldMap'@ over the deltas. +foldMapDelta :: Monoid m => (Delta v -> m) -> Diff k v -> m +foldMapDelta f (Diff m) = foldMap f m diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Ledger/Tables/DiffSeq.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Ledger/Tables/DiffSeq.hs new file mode 100644 index 0000000000..cdec6cdc1d --- /dev/null +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Ledger/Tables/DiffSeq.hs @@ -0,0 +1,369 @@ +{-# LANGUAGE ConstraintKinds #-} +{-# LANGUAGE DeriveAnyClass #-} +{-# LANGUAGE DeriveGeneric #-} +{-# LANGUAGE DeriveTraversable #-} +{-# LANGUAGE DerivingVia #-} +{-# LANGUAGE FlexibleContexts #-} +{-# LANGUAGE GeneralisedNewtypeDeriving #-} +{-# LANGUAGE MonoLocalBinds #-} +{-# LANGUAGE MultiParamTypeClasses #-} + +{- | Sequences of diffs for ledger tables. + + These diff sequences are an instantiation of a strict finger tree with root + measures. The tree/sequence itself contains diffs and slot information, while + the root measure is the total sum of all diffs in the sequence. The internal + measure is used to keep track of sequence length and maximum slot numbers. + + The diff datatype that we use forms a cancellative monoid, which allows for + relatively efficient splitting of finger trees with respect to recomputing + measures by means of subtracting diffs using the 'stripPrefix' and + 'stripSuffix' functions that cancellative monoids provide. Namely, if either + the left or right part of the split is small in comparison with the input + sequence, then we can subtract the diffs in the smaller part from the root + measure of the input to (quickly) compute the root measure of the /other/ + part of the split. This is much faster than computing the root measures from + scratch by doing a linear-time pass over the elements of the split parts, or + a logarithmic-time pass over intermediate sums of diffs in case we store + cumulative diffs in the nodes of the finger tree. + + === Example of fast splits + + As an analogy, consider this example: we have a sequence of consecutive + integer numbers @xs = [1..n]@ where @n@ is large, and we define the root + measure of the sequence to be the total sum of these numbers, @rmxs = sum + [1..n]@ (we assume @rmxs@ is fully evaluated). Say we split this sequence of + integer numbers at the index @2@, then we get /left/ and /right/ parts of the + split @ys@ and @zs@ respectively. + + > splitAt 2 xs = (ys, zs) = ([1..2], [3..n]) + + How should we compute we the root measure @rmys@ of @ys@? Since @ys@ is + small, we can just compute @rmys = sum [1..2]@. How should we compute the + root measure @rmzs@ of @zs@? We should not compute @rmzs = sum [3..n]@ in + this case, since @n@ is large. Instead, we compute @rmzs = rmxs - rmys@, + which evaluates to its result in time that is linear in the length of @ys@, + in this case @O(1)@. + + === Why not store sums of diffs in the internal measure instead of the root + measure? + + We could also have used the interal measure of the strict finger tree to + store intermediate sums of diffs for all subtrees of the node. The subtree + rooted at the root of the tree would then store the total sum of diffs. + However, we would have now to recompute a possibly logarithmic number of sums + of diffs when we split or extend the sequence. Given that in @consensus@ we + use the total sum of diffs nearly as often as we split or extend the diff + sequence, this proved to be too costly. The single-instance root measure + reduces the overhead of this "caching" of intermediate sums of diffs by only + using a single total sum of diffs, though augmented with 'stripPrefix' and + 'stripSuffix' operations to facilitate computing updated root measures. + +-} +module Ouroboros.Consensus.Ledger.Tables.DiffSeq ( + -- * Sequences of diffs + DiffSeq (..) + , Element (..) + , InternalMeasure (..) + , Length (..) + , RootMeasure (..) + , SlotNoLB (..) + , SlotNoUB (..) + -- * Short-hands for type-class constraints + , SM + -- * Queries + , cumulativeDiff + , length + , numDeletes + , numInserts + -- * Construction + , append + , empty + , extend + -- * Slots + , maxSlot + , minSlot + -- * Splitting + , split + , splitAt + , splitAtFromEnd + , splitAtSlot + ) where + +import qualified Cardano.Slotting.Slot as Slot +import qualified Control.Exception as Exn +import Data.Bifunctor (Bifunctor (bimap)) +import Data.FingerTree.RootMeasured.Strict hiding (split) +import qualified Data.FingerTree.RootMeasured.Strict as RMFT (splitSized) +import Data.Map.Diff.Strict (Diff) +import qualified Data.Map.Diff.Strict as Diff +import Data.Maybe.Strict +import Data.Monoid (Sum (..)) +import Data.Semigroup (Max (..), Min (..)) +import Data.Semigroup.Cancellative +import GHC.Generics (Generic) +import GHC.Stack (HasCallStack) +import NoThunks.Class (NoThunks) +import Ouroboros.Consensus.Util.Orphans () +import Prelude hiding (length, splitAt) + +{------------------------------------------------------------------------------- + Sequences of diffs +-------------------------------------------------------------------------------} + +-- | A sequence of key-value store differences. +-- +-- INVARIANT: The slot numbers of consecutive elements should be strictly +-- increasing. Manipulating the underlying @'StrictFingerTree'@ directly may +-- break this invariant. +newtype DiffSeq k v = + UnsafeDiffSeq + (StrictFingerTree + (RootMeasure k v) + (InternalMeasure k v) + (Element k v) + ) + deriving stock (Generic, Show, Eq) + deriving anyclass (NoThunks) + +-- The @'SlotNo'@ is not included in the root measure, since it is not a +-- cancellative monoid. +data RootMeasure k v = RootMeasure { + -- | Cumulative length + rmLength :: {-# UNPACK #-} !Length + -- | Cumulative diff + , rmDiff :: !(Diff k v) + -- | Cumulative number of inserts + , rmNumInserts :: !(Sum Int) + -- | Cumulative number of deletes + , rmNumDeletes :: !(Sum Int) + } + deriving stock (Generic, Show, Eq, Functor) + deriving anyclass (NoThunks) + +data InternalMeasure k v = InternalMeasure { + -- | Cumulative length + imLength :: {-# UNPACK #-} !Length + -- | Leftmost slot number (or lower bound) + -- + -- Empty diff sequences have no rightmost slot number, so in that case + -- @imSlotNo == Nothing@. + , imSlotNoL :: !(StrictMaybe SlotNoLB) + -- | Rightmost slot number (or upper bound) + -- + -- Empty diff sequences have no leftmost slot number, so in that case + -- @imSlotNo == Nothing@. + , imSlotNoR :: !(StrictMaybe SlotNoUB) + } + deriving stock (Generic, Show, Eq, Functor) + deriving anyclass (NoThunks) + +data Element k v = Element { + elSlotNo :: {-# UNPACK #-} !Slot.SlotNo + , elDiff :: !(Diff k v) + } + deriving stock (Generic, Show, Eq, Functor) + deriving anyclass (NoThunks) + +-- | Length of a sequence of differences. +newtype Length = Length { unLength :: Int } + deriving stock (Generic, Show, Eq, Ord) + deriving newtype (Num) + deriving anyclass (NoThunks) + deriving Semigroup via Sum Int + deriving Monoid via Sum Int + deriving (LeftReductive, RightReductive) via Sum Int + deriving (LeftCancellative, RightCancellative) via Sum Int + +-- | An upper bound on slot numbers. +newtype SlotNoUB = SlotNoUB {unSlotNoUB :: Slot.SlotNo} + deriving stock (Generic, Show, Eq, Ord) + deriving newtype (Num) + deriving anyclass (NoThunks) + deriving Semigroup via Max Slot.SlotNo + deriving Monoid via Max Slot.SlotNo + +-- | A lower bound on slot numbers. +newtype SlotNoLB = SlotNoLB {unSlotNoLB :: Slot.SlotNo} + deriving stock (Generic, Show, Eq, Ord) + deriving newtype (Num) + deriving anyclass (NoThunks) + deriving Semigroup via Min Slot.SlotNo + deriving Monoid via Min Slot.SlotNo + +-- TODO: once EBBs are removed, this can be a strict inequality. +noSlotBoundsIntersect :: SlotNoUB -> SlotNoLB -> Bool +noSlotBoundsIntersect (SlotNoUB sl1) (SlotNoLB sl2) = sl1 <= sl2 + +{------------------------------------------------------------------------------- + Root measuring +-------------------------------------------------------------------------------} + +instance (Ord k, Eq v) => RootMeasured (RootMeasure k v) (Element k v) where + measureRoot (Element _ d) = + RootMeasure 1 d (Sum $ Diff.numInserts d) (Sum $ Diff.numDeletes d) + +instance (Ord k, Eq v) => Semigroup (RootMeasure k v) where + RootMeasure len1 d1 n1 m1 <> RootMeasure len2 d2 n2 m2 = + RootMeasure (len1 <> len2) (d1 <> d2) (n1 <> n2) (m1 <> m2) + +instance (Ord k, Eq v) => Monoid (RootMeasure k v) where + mempty = RootMeasure mempty mempty mempty mempty + +instance (Ord k, Eq v) => LeftReductive (RootMeasure k v) where + stripPrefix (RootMeasure len1 d1 n1 m1) (RootMeasure len2 d2 n2 m2) = + RootMeasure <$> stripPrefix len1 len2 <*> stripPrefix d1 d2 + <*> stripPrefix n1 n2 <*> stripPrefix m1 m2 + +instance (Ord k, Eq v) => RightReductive (RootMeasure k v) where + stripSuffix (RootMeasure len1 d1 n1 m1) (RootMeasure len2 d2 n2 m2) = + RootMeasure <$> stripSuffix len1 len2 <*> stripSuffix d1 d2 + <*> stripSuffix n1 n2 <*> stripSuffix m1 m2 + +instance (Ord k, Eq v) => LeftCancellative (RootMeasure k v) +instance (Ord k, Eq v) => RightCancellative (RootMeasure k v) + +{------------------------------------------------------------------------------- + Internal measuring +-------------------------------------------------------------------------------} + +instance Measured (InternalMeasure k v) (Element k v) where + measure (Element sl _d) = InternalMeasure { + imLength = 1 + , imSlotNoL = SJust $ SlotNoLB sl + , imSlotNoR = SJust $ SlotNoUB sl + } + +instance Semigroup (InternalMeasure k v) where + InternalMeasure len1 sl1L sl1R <> InternalMeasure len2 sl2L sl2R = + InternalMeasure (len1 <> len2) (sl1L <> sl2L) (sl1R <> sl2R) + +instance Monoid (InternalMeasure k v) where + mempty = InternalMeasure mempty mempty mempty + +{------------------------------------------------------------------------------- + Short-hands types and constraints +-------------------------------------------------------------------------------} + +-- | Short-hand for @'SuperMeasured'@. +type SM k v = + SuperMeasured (RootMeasure k v) (InternalMeasure k v) (Element k v) + +{------------------------------------------------------------------------------- + Queries +-------------------------------------------------------------------------------} + +cumulativeDiff :: + SM k v + => DiffSeq k v + -> Diff k v +cumulativeDiff (UnsafeDiffSeq ft) = rmDiff $ measureRoot ft + +length :: + SM k v + => DiffSeq k v -> Int +length (UnsafeDiffSeq ft) = unLength . rmLength $ measureRoot ft + +numInserts :: + SM k v + => DiffSeq k v -> Sum Int +numInserts (UnsafeDiffSeq ft) = rmNumInserts $ measureRoot ft + +numDeletes :: + SM k v + => DiffSeq k v -> Sum Int +numDeletes (UnsafeDiffSeq ft) = rmNumDeletes $ measureRoot ft + +{------------------------------------------------------------------------------- + Construction +-------------------------------------------------------------------------------} + +extend :: + SM k v + => DiffSeq k v + -> Slot.SlotNo + -> Diff k v + -> DiffSeq k v +extend (UnsafeDiffSeq ft) sl d = + Exn.assert invariant $ UnsafeDiffSeq $ ft |> Element sl d + where + invariant = case imSlotNoR $ measure ft of + SNothing -> True + SJust slR -> noSlotBoundsIntersect slR (SlotNoLB sl) + +append :: + (Ord k, Eq v) + => DiffSeq k v + -> DiffSeq k v + -> DiffSeq k v +append (UnsafeDiffSeq ft1) (UnsafeDiffSeq ft2) = + Exn.assert invariant $ UnsafeDiffSeq (ft1 <> ft2) + where + sl1R = imSlotNoR $ measure ft1 + sl2L = imSlotNoL $ measure ft2 + invariant = case noSlotBoundsIntersect <$> sl1R <*> sl2L of + SNothing -> True + SJust v -> v + +empty :: + (Ord k, Eq v) + => DiffSeq k v +empty = UnsafeDiffSeq mempty + +{------------------------------------------------------------------------------- + Slots +-------------------------------------------------------------------------------} + +maxSlot :: + SM k v + => DiffSeq k v + -> StrictMaybe Slot.SlotNo +maxSlot (UnsafeDiffSeq ft) = unSlotNoUB <$> imSlotNoR (measure ft) + +minSlot :: + SM k v + => DiffSeq k v + -> StrictMaybe Slot.SlotNo +minSlot (UnsafeDiffSeq ft) = unSlotNoLB <$> imSlotNoL (measure ft) + +{------------------------------------------------------------------------------- + Splitting +-------------------------------------------------------------------------------} + +instance Sized (InternalMeasure k v) where + size = unLength . imLength + +splitAtSlot :: + SM k v + => Slot.SlotNo + -> DiffSeq k v + -> (DiffSeq k v, DiffSeq k v) +splitAtSlot slot = + split (strictMaybe False (slot <=) . fmap unSlotNoUB . imSlotNoR) + +split :: + SM k v + => (InternalMeasure k v -> Bool) + -> DiffSeq k v + -> (DiffSeq k v, DiffSeq k v) +split p (UnsafeDiffSeq ft) = bimap UnsafeDiffSeq UnsafeDiffSeq $ + RMFT.splitSized p ft + +splitAt :: + SM k v + => Int + -> DiffSeq k v + -> (DiffSeq k v, DiffSeq k v) +splitAt n = split ((Length n<) . imLength) + +splitAtFromEnd :: + (SM k v, HasCallStack) + => Int + -> DiffSeq k v + -> (DiffSeq k v, DiffSeq k v) +splitAtFromEnd n dseq = + if n <= len + then splitAt (len - n) dseq + else error $ "Can't split a seq of length " ++ show len ++ " from end at " ++ show n + where + len = length dseq diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Ledger/Tables/MapKind.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Ledger/Tables/MapKind.hs new file mode 100644 index 0000000000..8ff9259bfe --- /dev/null +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Ledger/Tables/MapKind.hs @@ -0,0 +1,205 @@ +{-# LANGUAGE DataKinds #-} +{-# LANGUAGE DeriveAnyClass #-} +{-# LANGUAGE DeriveGeneric #-} +{-# LANGUAGE DerivingStrategies #-} +{-# LANGUAGE GeneralisedNewtypeDeriving #-} +{-# LANGUAGE QuantifiedConstraints #-} +{-# LANGUAGE RankNTypes #-} +{-# LANGUAGE StandaloneKindSignatures #-} +{-# LANGUAGE TypeFamilies #-} + +-- | Classes for 'MapKind's and concrete 'MapKind's +module Ouroboros.Consensus.Ledger.Tables.MapKind ( + -- * Classes + CanMapKeysMK (..) + , CanMapMK (..) + , EqMK + , NoThunksMK + , ShowMK + , ZeroableMK (..) + -- * Concrete MapKinds + , CodecMK (..) + , DiffMK (..) + , EmptyMK (..) + , KeysMK (..) + , SeqDiffMK (..) + , TrackingMK (..) + , ValuesMK (..) + ) where + +import qualified Codec.CBOR.Decoding as CBOR +import qualified Codec.CBOR.Encoding as CBOR +import Data.Kind (Constraint) +import Data.Map.Strict (Map) +import qualified Data.Map.Strict as Map +import Data.Set (Set) +import qualified Data.Set as Set +import GHC.Generics (Generic) +import NoThunks.Class +import Ouroboros.Consensus.Ledger.Tables.Basics +import Ouroboros.Consensus.Ledger.Tables.Diff (Diff (..)) +import Ouroboros.Consensus.Ledger.Tables.DiffSeq + +{------------------------------------------------------------------------------- + Classes +-------------------------------------------------------------------------------} + +type ZeroableMK :: MapKind -> Constraint +class ZeroableMK mk where + emptyMK :: forall k v. (Ord k, Eq v) => mk k v + +type CanMapMK :: MapKind -> Constraint +class CanMapMK mk where + mapMK :: (v -> v') -> mk k v -> mk k v' + +type CanMapKeysMK :: MapKind -> Constraint +class CanMapKeysMK mk where + mapKeysMK :: Ord k' => (k -> k') -> mk k v -> mk k' v + +-- | For convenience, such that we don't have to include @QuantifiedConstraints@ +-- everywhere. +type ShowMK :: MapKind -> Constraint +class (forall k v. (Show k, Show v) => Show (mk k v)) => ShowMK mk + +-- | For convenience, such that we don't have to include @QuantifiedConstraints@ +-- everywhere. +type EqMK :: MapKind -> Constraint +class (forall k v. (Eq k, Eq v) => Eq (mk k v)) => EqMK mk + +-- | For convenience, such that we don't have to include @QuantifiedConstraints@ +-- everywhere. +type NoThunksMK :: MapKind -> Constraint +class (forall k v. (NoThunks k, NoThunks v) => NoThunks (mk k v)) + => NoThunksMK mk + +{------------------------------------------------------------------------------- + EmptyMK +-------------------------------------------------------------------------------} + +data EmptyMK k v = EmptyMK + deriving stock (Generic, Eq, Show) + deriving anyclass NoThunks + deriving anyclass (ShowMK, EqMK, NoThunksMK) + +instance ZeroableMK EmptyMK where + emptyMK = EmptyMK + +instance CanMapMK EmptyMK where + mapMK _ EmptyMK = EmptyMK + +instance CanMapKeysMK EmptyMK where + mapKeysMK _ EmptyMK = EmptyMK + +{------------------------------------------------------------------------------- + KeysMK +-------------------------------------------------------------------------------} + +newtype KeysMK k v = KeysMK (Set k) + deriving stock (Generic, Eq, Show) + deriving newtype (Semigroup, Monoid) + deriving anyclass NoThunks + deriving anyclass (ShowMK, EqMK, NoThunksMK) + +instance ZeroableMK KeysMK where + emptyMK = KeysMK mempty + +instance CanMapMK KeysMK where + mapMK _ (KeysMK ks) = KeysMK ks + +instance CanMapKeysMK KeysMK where + mapKeysMK f (KeysMK ks) = KeysMK $ Set.map f ks + +{------------------------------------------------------------------------------- + ValuesMK +-------------------------------------------------------------------------------} + +newtype ValuesMK k v = ValuesMK { getValuesMK :: Map k v } + deriving stock (Generic, Eq, Show) + deriving anyclass NoThunks + deriving anyclass (ShowMK, EqMK, NoThunksMK) + +instance ZeroableMK ValuesMK where + emptyMK = ValuesMK mempty + +instance CanMapMK ValuesMK where + mapMK f (ValuesMK vs) = ValuesMK $ Map.map f vs + +instance CanMapKeysMK ValuesMK where + mapKeysMK f (ValuesMK vs) = ValuesMK $ Map.mapKeys f vs + +{------------------------------------------------------------------------------- + DiffMK +-------------------------------------------------------------------------------} + +newtype DiffMK k v = DiffMK { getDiffMK :: Diff k v } + deriving stock (Generic, Eq, Show) + deriving newtype Functor + deriving anyclass NoThunks + deriving anyclass (ShowMK, EqMK, NoThunksMK) + +instance ZeroableMK DiffMK where + emptyMK = DiffMK mempty + +instance CanMapKeysMK DiffMK where + mapKeysMK f (DiffMK (Diff m)) = DiffMK . Diff $ + Map.mapKeys f m + +instance CanMapMK DiffMK where + mapMK f (DiffMK d) = DiffMK $ fmap f d + +{------------------------------------------------------------------------------- + TrackingMK +-------------------------------------------------------------------------------} + +data TrackingMK k v = TrackingMK !(Map k v) !(Diff k v) + deriving (Generic, Eq, Show, NoThunks) + deriving anyclass (ShowMK, EqMK, NoThunksMK) + +instance ZeroableMK TrackingMK where + emptyMK = TrackingMK mempty mempty + +instance CanMapMK TrackingMK where + mapMK f (TrackingMK vs d) = TrackingMK (fmap f vs) (fmap f d) + +instance CanMapKeysMK TrackingMK where + mapKeysMK f (TrackingMK vs d) = + TrackingMK + (getValuesMK . mapKeysMK f . ValuesMK $ vs) + (getDiffMK . mapKeysMK f . DiffMK $ d) + +{------------------------------------------------------------------------------- + SeqDiffMK +-------------------------------------------------------------------------------} + +newtype SeqDiffMK k v = SeqDiffMK { getSeqDiffMK :: DiffSeq k v } + deriving stock (Generic, Eq, Show) + deriving anyclass NoThunks + deriving anyclass (ShowMK, EqMK, NoThunksMK) + +instance ZeroableMK SeqDiffMK where + emptyMK = SeqDiffMK empty + +{------------------------------------------------------------------------------- + CodecMK +-------------------------------------------------------------------------------} + +-- | A codec 'MapKind' that will be used to refer to @'LedgerTables' l CodecMK@ +-- as the codecs that can encode every key and value in the @'LedgerTables' l +-- mk@. +-- +-- It is important to note that in the context of the HardForkCombinator, the +-- key @k@ has to be accessible from any era we are currently in, regardless of +-- which era it was created in. Because of that, we need that the serialization +-- of the key remains stable accross eras. +-- +-- Ledger will provide more efficient encoders than CBOR, which will produce a +-- @'ShortByteString'@ directly. +-- +-- See also 'HasCanonicalTxIn' in +-- "Ouroboros.Consensus.HardFork.Combinator.Ledger". +data CodecMK k v = CodecMK { + encodeKey :: !(k -> CBOR.Encoding) + , encodeValue :: !(v -> CBOR.Encoding) + , decodeKey :: !(forall s . CBOR.Decoder s k) + , decodeValue :: !(forall s . CBOR.Decoder s v) + } diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Ledger/Tables/Utils.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Ledger/Tables/Utils.hs new file mode 100644 index 0000000000..13be4c536b --- /dev/null +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Ledger/Tables/Utils.hs @@ -0,0 +1,327 @@ +{-# LANGUAGE FlexibleContexts #-} +{-# LANGUAGE FlexibleInstances #-} +{-# LANGUAGE Rank2Types #-} +{-# LANGUAGE ScopedTypeVariables #-} +{-# LANGUAGE TypeFamilies #-} + +-- | A collection of useful combinators to shorten the code in other places. +-- +-- TODO: #4394 provide better ergonomics. This whole module provides ways to +-- combine tables of two ledger states to produce another one. It is written +-- very much ad-hoc and we should probably think of some way to make this more +-- ergonomic. In particular for functions that take two ledger states, it is +-- unclear if it will keep the in-memory part of the first or the second one. +module Ouroboros.Consensus.Ledger.Tables.Utils ( + -- * Projection and injection + ltprj + , over + -- * Utils aliases: tables + , applyDiffForKeys + , applyDiffForKeysOnTables + , applyDiffs + , applyDiffs' + , attachAndApplyDiffs + , attachAndApplyDiffs' + , attachEmptyDiffs + , calculateAdditions + , calculateDifference + , calculateDifference' + , emptyLedgerTables + , forgetLedgerTables + , forgetTrackingDiffs + , forgetTrackingValues + , noNewTickingDiffs + , prependDiffs + , prependDiffs' + , prependTrackingDiffs + , prependTrackingDiffs' + , reapplyTracking + , restrictValues + , restrictValues' + -- * Testing + , rawApplyDiffs + , rawAttachAndApplyDiffs + , rawAttachEmptyDiffs + , rawCalculateDifference + , rawForgetTrackingDiffs + , rawForgetTrackingValues + , rawPrependDiffs + , rawPrependTrackingDiffs + , rawReapplyTracking + , rawRestrictValues + ) where + +import qualified Data.Map.Strict as Map +import Ouroboros.Consensus.Ledger.Tables +import qualified Ouroboros.Consensus.Ledger.Tables.Diff as Diff + +{------------------------------------------------------------------------------- + Projection and injection +-------------------------------------------------------------------------------} + +over :: + ( HasLedgerTables l + , CanMapMK mk' + , CanMapKeysMK mk' + , ZeroableMK mk' + ) + => l mk + -> LedgerTables l mk' + -> l mk' +over = withLedgerTables + +ltprj :: + (HasLedgerTables l, Castable l l', CanMapMK mk, CanMapKeysMK mk, ZeroableMK mk) + => l mk + -> LedgerTables l' mk +ltprj = castLedgerTables . projectLedgerTables + +{------------------------------------------------------------------------------- + Utils aliases: tables +-------------------------------------------------------------------------------} + +-- | When applying a block that is not on an era transition, ticking won't +-- generate new values, so this function can be used to wrap the call to the +-- ledger rules that perform the tick. +noNewTickingDiffs :: HasLedgerTables l + => l any + -> l DiffMK +noNewTickingDiffs l = withLedgerTables l emptyLedgerTables + +forgetLedgerTables :: HasLedgerTables l => l mk -> l EmptyMK +forgetLedgerTables l = withLedgerTables l emptyLedgerTables + +-- | Empty values for every table +emptyLedgerTables :: (ZeroableMK mk, LedgerTableConstraints l) => LedgerTables l mk +emptyLedgerTables = ltpure emptyMK + +-- +-- Forget parts of 'TrackingMK' +-- + +rawForgetTrackingValues :: TrackingMK k v -> DiffMK k v +rawForgetTrackingValues (TrackingMK _vs d) = DiffMK d + +forgetTrackingValues :: (HasLedgerTables l, LedgerTableConstraints l) => l TrackingMK -> l DiffMK +forgetTrackingValues l = over l $ ltmap rawForgetTrackingValues (ltprj l) + +-- +-- Forget diffs +-- + +rawForgetTrackingDiffs :: TrackingMK k v -> ValuesMK k v +rawForgetTrackingDiffs (TrackingMK vs _ds) = ValuesMK vs + +forgetTrackingDiffs :: (LedgerTableConstraints l, HasLedgerTables l) => l TrackingMK -> l ValuesMK +forgetTrackingDiffs l = over l $ ltmap rawForgetTrackingDiffs (ltprj l) + +-- +-- Prepend diffs +-- + +rawPrependDiffs :: + Ord k + => DiffMK k v -- ^ Earlier differences + -> DiffMK k v -- ^ Later differences + -> DiffMK k v +rawPrependDiffs (DiffMK d1) (DiffMK d2) = DiffMK (d1 <> d2) + +-- | Prepend diffs from the first ledger state to the diffs from the second +-- ledger state. Returns ledger tables. +prependDiffs' :: + (Castable l l'', Castable l' l'', HasLedgerTables l, HasLedgerTables l') + => l DiffMK -> l' DiffMK -> LedgerTables l'' DiffMK +prependDiffs' l1 l2 = ltliftA2 rawPrependDiffs (ltprj l1) (ltprj l2) + +-- | Like 'prependDiffs'', but puts the ledger tables inside the second ledger +-- state. +prependDiffs :: + (Castable l l', HasLedgerTables l, HasLedgerTables l') + => l DiffMK -> l' DiffMK -> l' DiffMK +prependDiffs l1 l2 = over l2 $ prependDiffs' l1 l2 + +-- +-- Apply diffs +-- + +rawApplyDiffs :: + Ord k + => ValuesMK k v -- ^ Values to which differences are applied + -> DiffMK k v -- ^ Differences to apply + -> ValuesMK k v +rawApplyDiffs (ValuesMK vals) (DiffMK diffs) = ValuesMK (Diff.applyDiff vals diffs) + +-- | Apply diffs from the second ledger state to the values of the first ledger +-- state. Returns ledger tables. +applyDiffs' :: + (Castable l l'', Castable l' l'', HasLedgerTables l, HasLedgerTables l') + => l ValuesMK -> l' DiffMK -> LedgerTables l'' ValuesMK +applyDiffs' l1 l2 = ltliftA2 rawApplyDiffs (ltprj l1) (ltprj l2) + +-- | Like 'applyDiffs'', but puts the ledger tables inside the second ledger +-- state. +applyDiffs :: + (Castable l l', HasLedgerTables l, HasLedgerTables l') + => l ValuesMK -> l' DiffMK -> l' ValuesMK +applyDiffs l1 l2 = over l2 $ applyDiffs' l1 l2 + +rawApplyDiffForKeys :: + Ord k + => ValuesMK k v + -> KeysMK k v + -> DiffMK k v + -> ValuesMK k v +rawApplyDiffForKeys (ValuesMK vals) (KeysMK keys) (DiffMK diffs) = + ValuesMK (Diff.applyDiffForKeys vals keys diffs) + +applyDiffForKeys' :: + (Castable l l'', Castable l l', HasLedgerTables l, HasLedgerTables l') + => l ValuesMK -> LedgerTables l KeysMK -> l' DiffMK -> LedgerTables l'' ValuesMK +applyDiffForKeys' l1 l2 l3 = ltliftA3 rawApplyDiffForKeys (ltprj l1) (castLedgerTables l2) (ltprj l3) + +applyDiffForKeys :: + (Castable l l', HasLedgerTables l, HasLedgerTables l') + => l ValuesMK -> LedgerTables l KeysMK -> l' DiffMK -> l' ValuesMK +applyDiffForKeys l1 l2 l3 = over l3 $ applyDiffForKeys' l1 l2 l3 + +applyDiffForKeys'onTables :: + (Castable l l'', Castable l l', HasLedgerTables l, HasLedgerTables l') + => LedgerTables l ValuesMK -> LedgerTables l KeysMK -> l' DiffMK -> LedgerTables l'' ValuesMK +applyDiffForKeys'onTables l1 l2 l3 = ltliftA3 rawApplyDiffForKeys (castLedgerTables l1) (castLedgerTables l2) (ltprj l3) + +applyDiffForKeysOnTables :: + (Castable l l', HasLedgerTables l, HasLedgerTables l') + => LedgerTables l ValuesMK -> LedgerTables l KeysMK -> l' DiffMK -> l' ValuesMK +applyDiffForKeysOnTables l1 l2 l3 = over l3 $ applyDiffForKeys'onTables l1 l2 l3 + +-- +-- Calculate differences +-- + +rawCalculateDifference :: + (Ord k, Eq v) + => ValuesMK k v + -> ValuesMK k v + -> TrackingMK k v +rawCalculateDifference (ValuesMK before) (ValuesMK after) = TrackingMK after (Diff.diff before after) + +calculateAdditions :: + (LedgerTableConstraints l, HasLedgerTables l) + => l ValuesMK -> l TrackingMK +calculateAdditions l = over l $ ltliftA (rawCalculateDifference emptyMK) (ltprj l) + +-- | Calculate the differences between two ledger states. The first ledger state +-- is considered /before/, the second ledger state is considered /after/. +-- Returns ledger tables. +calculateDifference' :: + (Castable l l'', Castable l' l'', HasLedgerTables l, HasLedgerTables l') + => l ValuesMK -> l' ValuesMK -> LedgerTables l'' TrackingMK +calculateDifference' l1 l2 = ltliftA2 rawCalculateDifference (ltprj l1) (ltprj l2) + +-- | Like 'calculcateDifference'', but puts the ledger tables inside the second +-- leger state. +calculateDifference :: + (Castable l l', HasLedgerTables l, HasLedgerTables l') + => l ValuesMK -> l' ValuesMK -> l' TrackingMK +calculateDifference l1 l2 = over l2 $ calculateDifference' l1 l2 + +-- +-- Attaching and/or applying diffs +-- + +rawAttachAndApplyDiffs :: + Ord k + => DiffMK k v + -> ValuesMK k v + -> TrackingMK k v +rawAttachAndApplyDiffs (DiffMK d) (ValuesMK v) = TrackingMK (Diff.applyDiff v d) d + +-- | Apply the differences from the first ledger state to the values of the +-- second ledger state, and returns the resulting values together with the +-- applied diff. +attachAndApplyDiffs' :: + (Castable l l'', Castable l' l'', HasLedgerTables l, HasLedgerTables l') + => l DiffMK -> l' ValuesMK -> LedgerTables l'' TrackingMK +attachAndApplyDiffs' l1 l2 = ltliftA2 rawAttachAndApplyDiffs (ltprj l1) (ltprj l2) + +-- | Like 'attachAndApplyDiffs'', but puts the ledger tables inside the first +-- leger state. +attachAndApplyDiffs :: + (Castable l l', HasLedgerTables l, HasLedgerTables l') + => l DiffMK -> l' ValuesMK -> l TrackingMK +attachAndApplyDiffs l1 l2 = over l1 $ attachAndApplyDiffs' l1 l2 + +rawAttachEmptyDiffs :: Ord k => ValuesMK k v -> TrackingMK k v +rawAttachEmptyDiffs (ValuesMK v) = TrackingMK v mempty + +-- | Make a 'TrackingMK' with empty diffs. +attachEmptyDiffs :: HasLedgerTables l => l ValuesMK -> l TrackingMK +attachEmptyDiffs l1 = over l1 $ ltmap rawAttachEmptyDiffs (ltprj l1) + +-- +-- Prepend tracking diffs +-- + +-- | Prepend the former tracking diffs to the latter tracking diffs. Keep the +-- second tracking values. +-- +-- PRECONDITION: Given that the first argument is @TrackingMK v1 d1@, and the +-- second argument is @TrackingMK v2 d2@, it should be the case that @applyDiff +-- v1 d2 == v2@. +rawPrependTrackingDiffs :: + Ord k + => TrackingMK k v + -> TrackingMK k v + -> TrackingMK k v +rawPrependTrackingDiffs (TrackingMK _ d1) (TrackingMK v d2) = + TrackingMK v (d1 <> d2) + +-- | Prepend tracking diffs from the first ledger state to the tracking diffs +-- from the second ledger state. Keep the tracking values of the second ledger +-- state. +-- +-- PRECONDITION: See 'rawPrependTrackingDiffs'. +prependTrackingDiffs' :: + (Castable l l'', Castable l' l'', HasLedgerTables l, HasLedgerTables l') + => l TrackingMK -> l' TrackingMK -> LedgerTables l'' TrackingMK +prependTrackingDiffs' l1 l2 = ltliftA2 rawPrependTrackingDiffs (ltprj l1) (ltprj l2) + +-- | Like 'prependTrackingDiffs'', but puts the ledger tables inside the second +-- leger state. +prependTrackingDiffs :: + (Castable l l', HasLedgerTables l, HasLedgerTables l') + => l TrackingMK -> l' TrackingMK -> l' TrackingMK +prependTrackingDiffs l1 l2 = over l2 $ prependTrackingDiffs' l1 l2 + +-- Reapply tracking diffs + +rawReapplyTracking :: + Ord k + => TrackingMK k v + -> ValuesMK k v + -> TrackingMK k v +rawReapplyTracking (TrackingMK _v d) (ValuesMK v) = TrackingMK (Diff.applyDiff v d) d + +-- | Replace the tables in the first parameter with the tables of the second +-- parameter after applying the differences in the first parameter to them +reapplyTracking :: LedgerTableConstraints l => LedgerTables l TrackingMK -> LedgerTables l ValuesMK -> LedgerTables l TrackingMK +reapplyTracking = ltliftA2 rawReapplyTracking + +-- Restrict values + +rawRestrictValues :: + Ord k + => ValuesMK k v + -> KeysMK k v + -> ValuesMK k v +rawRestrictValues (ValuesMK v) (KeysMK k) = ValuesMK $ v `Map.restrictKeys` k + +restrictValues' :: + (Castable l l'', Castable l' l'', HasLedgerTables l, HasLedgerTables l') + => l ValuesMK -> l' KeysMK -> LedgerTables l'' ValuesMK +restrictValues' l1 l2 = ltliftA2 rawRestrictValues (ltprj l1) (ltprj l2) + +restrictValues :: + (Castable l l', HasLedgerTables l, HasLedgerTables l') + => l ValuesMK -> l' KeysMK -> l ValuesMK +restrictValues l1 l2 = over l1 $ ltliftA2 rawRestrictValues (ltprj l1) (ltprj l2) diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Mempool.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Mempool.hs index fed42f2a46..1e015dca12 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Mempool.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Mempool.hs @@ -4,6 +4,7 @@ module Ouroboros.Consensus.Mempool ( -- ** Mempool Mempool (..) -- ** Transaction adding + , AddTxOnBehalfOf (..) , MempoolAddTxResult (..) , addLocalTxs , addTxs @@ -33,11 +34,11 @@ module Ouroboros.Consensus.Mempool ( , TraceEventMempool (..) ) where -import Ouroboros.Consensus.Mempool.API (ForgeLedgerState (..), - Mempool (..), MempoolAddTxResult (..), - MempoolSnapshot (..), SizeInBytes, TicketNo, addLocalTxs, - addTxs, isMempoolTxAdded, isMempoolTxRejected, - mempoolTxAddedToMaybe, zeroTicketNo) +import Ouroboros.Consensus.Mempool.API (AddTxOnBehalfOf (..), + ForgeLedgerState (..), Mempool (..), + MempoolAddTxResult (..), MempoolSnapshot (..), SizeInBytes, + TicketNo, addLocalTxs, addTxs, isMempoolTxAdded, + isMempoolTxRejected, mempoolTxAddedToMaybe, zeroTicketNo) import Ouroboros.Consensus.Mempool.Capacity (MempoolCapacityBytesOverride (..), MempoolSize (..), computeMempoolCapacity) diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Mempool/API.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Mempool/API.hs index bb4841f044..0629f175cd 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Mempool/API.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Mempool/API.hs @@ -1,3 +1,4 @@ +{-# LANGUAGE CPP #-} {-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE ScopedTypeVariables #-} {-# LANGUAGE StandaloneDeriving #-} @@ -30,6 +31,7 @@ module Ouroboros.Consensus.Mempool.API ( , zeroTicketNo ) where +import qualified Data.List.NonEmpty as NE import Ouroboros.Consensus.Block (SlotNo) import Ouroboros.Consensus.Ledger.Abstract import Ouroboros.Consensus.Ledger.SupportsMempool @@ -156,7 +158,7 @@ data Mempool m blk = Mempool { -> m (MempoolAddTxResult blk) -- | Manually remove the given transactions from the mempool. - , removeTxs :: [GenTxId blk] -> m () + , removeTxs :: NE.NonEmpty (GenTxId blk) -> m () -- | Sync the transactions in the mempool with the current ledger state -- of the 'ChainDB'. @@ -185,7 +187,21 @@ data Mempool m blk = Mempool { -- the given ledger state -- -- This does not update the state of the mempool. - , getSnapshotFor :: ForgeLedgerState blk -> STM m (MempoolSnapshot blk) + , getSnapshotFor :: + SlotNo +#if __GLASGOW_HASKELL__ >= 902 + -- ^ The current slot in which we want the snapshot +#endif + -> TickedLedgerState blk DiffMK +#if __GLASGOW_HASKELL__ >= 902 + -- ^ The ledger state ticked to the given slot number +#endif + -> (LedgerTables (LedgerState blk) KeysMK -> m (LedgerTables (LedgerState blk) ValuesMK)) +#if __GLASGOW_HASKELL__ >= 902 + -- ^ A function that returns values corresponding to the given keys for + -- the unticked ledger state. +#endif + -> m (MempoolSnapshot blk) -- | Get the mempool's capacity -- @@ -288,7 +304,7 @@ data ForgeLedgerState blk = -- This will only be the case when we realized that we are the slot leader -- and we are actually producing a block. It is the caller's responsibility -- to call 'applyChainTick' and produce the ticked ledger state. - ForgeInKnownSlot SlotNo (TickedLedgerState blk) + ForgeInKnownSlot SlotNo (TickedLedgerState blk DiffMK) -- | The slot number of the block is not yet known -- @@ -296,8 +312,7 @@ data ForgeLedgerState blk = -- will end up, we have to make an assumption about which slot number to use -- for 'applyChainTick' to prepare the ledger state; we will assume that -- they will end up in the slot after the slot at the tip of the ledger. - | ForgeInUnknownSlot (LedgerState blk) - + | ForgeInUnknownSlot (LedgerState blk EmptyMK) {------------------------------------------------------------------------------- Snapshot of the mempool @@ -346,6 +361,7 @@ data MempoolSnapshot blk = MempoolSnapshot { -- | The block number of the "virtual block" under construction , snapshotSlotNo :: SlotNo - -- | The ledger state after all transactions in the snapshot - , snapshotLedgerState :: TickedLedgerState blk + -- | The resulting state currently in the mempool after applying the + -- transactions + , snapshotState :: TickedLedgerState blk DiffMK } diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Mempool/Capacity.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Mempool/Capacity.hs index 9fd85c37cd..0e08581f82 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Mempool/Capacity.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Mempool/Capacity.hs @@ -1,5 +1,10 @@ +{-# LANGUAGE DeriveAnyClass #-} +{-# LANGUAGE DeriveGeneric #-} {-# LANGUAGE DerivingVia #-} +{-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE GeneralizedNewtypeDeriving #-} +{-# LANGUAGE ScopedTypeVariables #-} +{-# LANGUAGE TypeFamilies #-} -- | Mempool capacity, size and transaction size datatypes. -- @@ -20,6 +25,8 @@ import Data.DerivingVia (InstantiatedAt (..)) import Data.Measure (Measure) import Data.Semigroup (stimes) import Data.Word (Word32) +import GHC.Generics +import NoThunks.Class import Ouroboros.Consensus.Ledger.Basics import Ouroboros.Consensus.Ledger.SupportsMempool @@ -51,7 +58,7 @@ mkCapacityBytesOverride = MempoolCapacityBytesOverride computeMempoolCapacity :: LedgerSupportsMempool blk => LedgerConfig blk - -> TickedLedgerState blk + -> TickedLedgerState blk mk -> MempoolCapacityBytesOverride -> TxMeasure blk computeMempoolCapacity cfg st override = @@ -66,13 +73,15 @@ computeMempoolCapacity cfg st override = -- This calculation is happening at Word32. Thus overflow is silently -- accepted. Adding one less than the denominator to the numerator -- effectively rounds up instead of down. - max 1 $ (x + oneBlockBytes - 1) `div` oneBlockBytes + max 1 $ if x + oneBlockBytes < x + then x `div` oneBlockBytes + else (x + oneBlockBytes - 1) `div` oneBlockBytes SemigroupViaMeasure capacity = stimes blockCount (SemigroupViaMeasure oneBlock) newtype SemigroupViaMeasure a = SemigroupViaMeasure a - deriving (Eq, Measure) + deriving newtype (Eq, Measure) deriving Semigroup via (InstantiatedAt Measure (SemigroupViaMeasure a)) {------------------------------------------------------------------------------- @@ -85,7 +94,7 @@ data MempoolSize = MempoolSize -- ^ The number of transactions in the mempool. , msNumBytes :: !ByteSize32 -- ^ The summed byte size of all the transactions in the mempool. - } deriving (Eq, Show) + } deriving (Eq, Show, Generic, NoThunks) instance Semigroup MempoolSize where MempoolSize xt xb <> MempoolSize yt yb = MempoolSize (xt + yt) (xb <> yb) diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Mempool/Impl/Common.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Mempool/Impl/Common.hs index 0e67f2e210..7053f24513 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Mempool/Impl/Common.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Mempool/Impl/Common.hs @@ -1,3 +1,5 @@ +{-# LANGUAGE CPP #-} +{-# LANGUAGE DataKinds #-} {-# LANGUAGE DeriveAnyClass #-} {-# LANGUAGE DeriveGeneric #-} {-# LANGUAGE FlexibleContexts #-} @@ -20,26 +22,25 @@ module Ouroboros.Consensus.Mempool.Impl.Common ( , LedgerInterface (..) , chainDBLedgerInterface -- * Validation - , ValidationResult (..) - , extendVRNew - , extendVRPrevApplied + , RevalidateTxsResult (..) , revalidateTxsFor - , validateStateFor + , validateNewTransaction -- * Tracing , TraceEventMempool (..) -- * Conversions - , internalStateFromVR , snapshotFromIS - , validationResultFromIS -- * Ticking a ledger state , tickLedgerState ) where import Control.Concurrent.Class.MonadMVar (MVar, newMVar) -import Control.Exception (assert) +import Control.Concurrent.Class.MonadSTM.Strict.TMVar (newTMVarIO) import Control.Monad.Trans.Except (runExcept) import Control.Tracer -import Data.Maybe (isNothing) +#if __GLASGOW_HASKELL__ < 910 +import Data.Foldable +#endif +import qualified Data.List.NonEmpty as NE import Data.Set (Set) import qualified Data.Set as Set import Data.Typeable @@ -49,13 +50,13 @@ import Ouroboros.Consensus.HeaderValidation import Ouroboros.Consensus.Ledger.Abstract import Ouroboros.Consensus.Ledger.Extended (ledgerState) import Ouroboros.Consensus.Ledger.SupportsMempool +import Ouroboros.Consensus.Ledger.Tables.Utils import Ouroboros.Consensus.Mempool.API import Ouroboros.Consensus.Mempool.Capacity import Ouroboros.Consensus.Mempool.TxSeq (TxSeq (..), TxTicket (..)) import qualified Ouroboros.Consensus.Mempool.TxSeq as TxSeq import Ouroboros.Consensus.Storage.ChainDB (ChainDB) import qualified Ouroboros.Consensus.Storage.ChainDB.API as ChainDB -import Ouroboros.Consensus.Util (repeatedly) import Ouroboros.Consensus.Util.Enclose (EnclosingTimed) import Ouroboros.Consensus.Util.IOLike hiding (newMVar) @@ -90,12 +91,10 @@ data InternalState blk = IS { -- -- INVARIANT: 'isLedgerState' is the ledger resulting from applying the -- transactions in 'isTxs' against the ledger identified 'isTip' as tip. - , isLedgerState :: !(TickedLedgerState blk) + , isLedgerState :: !(TickedLedgerState blk DiffMK) -- | The tip of the chain that 'isTxs' was validated against - -- - -- This comes from the underlying ledger state ('tickedLedgerState') - , isTip :: !(ChainHash blk) + , isTip :: !(Point blk) -- | The most recent 'SlotNo' that 'isTxs' was validated against -- @@ -130,7 +129,7 @@ data InternalState blk = IS { deriving instance ( NoThunks (Validated (GenTx blk)) , NoThunks (GenTxId blk) - , NoThunks (Ticked (LedgerState blk)) + , NoThunks (TickedLedgerState blk DiffMK) , NoThunks (TxMeasure blk) , StandardHash blk , Typeable blk @@ -150,13 +149,13 @@ initInternalState :: -> TicketNo -- ^ Used for 'isLastTicketNo' -> LedgerConfig blk -> SlotNo - -> TickedLedgerState blk + -> TickedLedgerState blk DiffMK -> InternalState blk initInternalState capacityOverride lastTicketNo cfg slot st = IS { isTxs = TxSeq.Empty , isTxIds = Set.empty , isLedgerState = st - , isTip = castHash (getTipHash st) + , isTip = castPoint $ getTip st , isSlotNo = slot , isLastTicketNo = lastTicketNo , isCapacity = computeMempoolCapacity cfg st capacityOverride @@ -168,15 +167,30 @@ initInternalState capacityOverride lastTicketNo cfg slot st = IS { -- | Abstract interface needed to run a Mempool. data LedgerInterface m blk = LedgerInterface - { getCurrentLedgerState :: STM m (LedgerState blk) + { -- | Get the current tip of the LedgerDB. + getCurrentLedgerState :: STM m (LedgerState blk EmptyMK) + -- | Get values at the given point on the chain. Returns Nothing if the + -- anchor moved or if the state is not found on the ledger db. + , getLedgerTablesAtFor + :: Point blk + -> [GenTx blk] + -> m (Maybe (LedgerTables (LedgerState blk) ValuesMK)) } -- | Create a 'LedgerInterface' from a 'ChainDB'. chainDBLedgerInterface :: - (IOLike m, IsLedger (LedgerState blk)) + ( IOLike m + , LedgerSupportsMempool blk + ) => ChainDB m blk -> LedgerInterface m blk chainDBLedgerInterface chainDB = LedgerInterface - { getCurrentLedgerState = ledgerState <$> ChainDB.getCurrentLedger chainDB + { getCurrentLedgerState = + ledgerState <$> ChainDB.getCurrentLedger chainDB + , getLedgerTablesAtFor = \pt txs -> do + let keys = castLedgerTables + $ foldl' (<>) emptyLedgerTables + $ map getTransactionKeySets txs + fmap castLedgerTables <$> ChainDB.getLedgerTablesAtFor chainDB pt keys } {------------------------------------------------------------------------------- @@ -189,7 +203,7 @@ chainDBLedgerInterface chainDB = LedgerInterface data MempoolEnv m blk = MempoolEnv { mpEnvLedger :: LedgerInterface m blk , mpEnvLedgerCfg :: LedgerConfig blk - , mpEnvStateVar :: StrictTVar m (InternalState blk) + , mpEnvStateVar :: StrictTMVar m (InternalState blk) , mpEnvAddTxsRemoteFifo :: MVar m () , mpEnvAddTxsAllFifo :: MVar m () , mpEnvTracer :: Tracer m (TraceEventMempool blk) @@ -197,7 +211,6 @@ data MempoolEnv m blk = MempoolEnv { } initMempoolEnv :: ( IOLike m - , NoThunks (GenTxId blk) , LedgerSupportsMempool blk , ValidateEnvelope blk ) @@ -210,7 +223,7 @@ initMempoolEnv ledgerInterface cfg capacityOverride tracer = do st <- atomically $ getCurrentLedgerState ledgerInterface let (slot, st') = tickLedgerState cfg (ForgeInUnknownSlot st) isVar <- - newTVarIO + newTMVarIO $ initInternalState capacityOverride TxSeq.zeroTicketNo cfg slot st' addTxRemoteFifo <- newMVar () addTxAllFifo <- newMVar () @@ -233,7 +246,7 @@ tickLedgerState :: forall blk. (UpdateLedger blk, ValidateEnvelope blk) => LedgerConfig blk -> ForgeLedgerState blk - -> (SlotNo, TickedLedgerState blk) + -> (SlotNo, TickedLedgerState blk DiffMK) tickLedgerState _cfg (ForgeInKnownSlot slot st) = (slot, st) tickLedgerState cfg (ForgeInUnknownSlot st) = (slot, applyChainTick cfg slot st) @@ -253,167 +266,104 @@ tickLedgerState cfg (ForgeInUnknownSlot st) = Validation -------------------------------------------------------------------------------} -data ValidationResult invalidTx blk = ValidationResult { - -- | The tip of the chain before applying these transactions - vrBeforeTip :: ChainHash blk - - -- | The slot number of the (imaginary) block the txs will be placed in - , vrSlotNo :: SlotNo - - -- | Capacity of the Mempool. Corresponds to 'vrBeforeTip' and - -- 'vrBeforeSlotNo', /not/ 'vrAfter'. - , vrBeforeCapacity :: TxMeasure blk - - -- | The transactions that were found to be valid (oldest to newest) - , vrValid :: TxSeq (TxMeasure blk) (Validated (GenTx blk)) - - -- | The cached IDs of transactions that were found to be valid (oldest to - -- newest) - , vrValidTxIds :: Set (GenTxId blk) - - -- | A new transaction (not previously known) which was found to be valid. - -- - -- n.b. This will only contain a valid transaction that was /newly/ added - -- to the mempool (not a previously known valid transaction). - , vrNewValid :: Maybe (Validated (GenTx blk)) - - -- | The state of the ledger after applying 'vrValid' against the ledger - -- state identifeid by 'vrBeforeTip'. - , vrAfter :: TickedLedgerState blk - - -- | The transactions that were invalid, along with their errors - -- - -- From oldest to newest. - , vrInvalid :: [(invalidTx, ApplyTxErr blk)] - - -- | The mempool 'TicketNo' counter. - -- - -- When validating new transactions, this should be incremented, starting - -- from 'isLastTicketNo' of the 'InternalState'. - -- When validating previously applied transactions, this field should not - -- be affected. - , vrLastTicketNo :: TicketNo - } - --- | Extend 'ValidationResult' with a previously validated transaction that --- may or may not be valid in this ledger state --- --- n.b. Even previously validated transactions may not be valid in a different --- ledger state; it is /still/ useful to indicate whether we have previously --- validated this transaction because, if we have, we can utilize 'reapplyTx' --- rather than 'applyTx' and, therefore, skip things like cryptographic --- signatures. -extendVRPrevApplied :: (LedgerSupportsMempool blk, HasTxId (GenTx blk)) - => LedgerConfig blk - -> TxTicket (TxMeasure blk) (Validated (GenTx blk)) - -> ValidationResult (Validated (GenTx blk)) blk - -> ValidationResult (Validated (GenTx blk)) blk -extendVRPrevApplied cfg txTicket vr = - case runExcept (reapplyTx cfg vrSlotNo tx vrAfter) of - Left err -> vr { vrInvalid = (tx, err) : vrInvalid - } - Right st' -> vr { vrValid = vrValid :> txTicket - , vrValidTxIds = Set.insert (txId (txForgetValidated tx)) vrValidTxIds - , vrAfter = st' - } - where - TxTicket { txTicketTx = tx } = txTicket - ValidationResult { vrValid, vrSlotNo, vrValidTxIds, vrAfter, vrInvalid } = vr - --- | Extend 'ValidationResult' with a new transaction (one which we have not +-- | Extend 'InternalState' with a new transaction (one which we have not -- previously validated) that may or may not be valid in this ledger state. --- --- PRECONDITION: 'vrNewValid' is 'Nothing'. In other words: new transactions --- should be validated one-by-one, not by calling 'extendVRNew' on its result --- again. -extendVRNew :: (LedgerSupportsMempool blk, HasTxId (GenTx blk)) - => LedgerConfig blk - -> WhetherToIntervene - -> GenTx blk - -> ValidationResult (GenTx blk) blk - -> Either - (ApplyTxErr blk) - ( Validated (GenTx blk) - , ValidationResult (GenTx blk) blk - ) -extendVRNew cfg wti tx vr = - assert (isNothing vrNewValid) $ runExcept m - where - ValidationResult { - vrValid - , vrValidTxIds - , vrAfter - , vrLastTicketNo - , vrNewValid - , vrSlotNo - } = vr - - m = do - txsz <- txMeasure cfg vrAfter tx - (st', vtx) <- applyTx cfg wti vrSlotNo tx vrAfter - let nextTicketNo = succ vrLastTicketNo - pure - ( vtx - , vr { vrValid = vrValid :> TxTicket vtx nextTicketNo txsz - , vrValidTxIds = Set.insert (txId tx) vrValidTxIds - , vrNewValid = Just vtx - , vrAfter = st' - , vrLastTicketNo = nextTicketNo +validateNewTransaction + :: (LedgerSupportsMempool blk, HasTxId (GenTx blk)) + => LedgerConfig blk + -> WhetherToIntervene + -> GenTx blk + -> TxMeasure blk + -> TickedLedgerState blk ValuesMK + -> InternalState blk + -> ( Either (ApplyTxErr blk) (Validated (GenTx blk)) + , InternalState blk + ) +validateNewTransaction cfg wti tx txsz st is = + case runExcept (applyTx cfg wti isSlotNo tx st) of + Left err -> ( Left err, is ) + Right (st', vtx) -> + ( Right vtx + , is { isTxs = isTxs :> TxTicket vtx nextTicketNo txsz + , isTxIds = Set.insert (txId tx) isTxIds + , isLedgerState = prependDiffs isLedgerState st' + , isLastTicketNo = nextTicketNo } ) - -{------------------------------------------------------------------------------- - Conversions --------------------------------------------------------------------------------} - --- | Construct internal state from 'ValidationResult' --- --- Discards information about invalid and newly valid transactions -internalStateFromVR :: ValidationResult invalidTx blk -> InternalState blk -internalStateFromVR vr = IS { - isTxs = vrValid - , isTxIds = vrValidTxIds - , isLedgerState = vrAfter - , isTip = vrBeforeTip - , isSlotNo = vrSlotNo - , isLastTicketNo = vrLastTicketNo - , isCapacity = vrBeforeCapacity - } - where - ValidationResult { - vrBeforeTip - , vrSlotNo - , vrBeforeCapacity - , vrValid - , vrValidTxIds - , vrAfter - , vrLastTicketNo - } = vr - --- | Construct a 'ValidationResult' from internal state. -validationResultFromIS :: InternalState blk -> ValidationResult invalidTx blk -validationResultFromIS is = ValidationResult { - vrBeforeTip = isTip - , vrSlotNo = isSlotNo - , vrBeforeCapacity = isCapacity - , vrValid = isTxs - , vrValidTxIds = isTxIds - , vrNewValid = Nothing - , vrAfter = isLedgerState - , vrInvalid = [] - , vrLastTicketNo = isLastTicketNo - } where IS { isTxs , isTxIds , isLedgerState - , isTip - , isSlotNo , isLastTicketNo - , isCapacity + , isSlotNo } = is + nextTicketNo = succ isLastTicketNo + +-- | Revalidate the given transactions against the given ticked ledger state, +-- producing a new 'InternalState'. +-- +-- Note that this function will perform revalidation so it is expected that the +-- transactions given to it were previously applied, for example if we are +-- revalidating the whole set of transactions onto a new state, or if we remove +-- some transactions and revalidate the remaining ones. +revalidateTxsFor + :: (LedgerSupportsMempool blk, HasTxId (GenTx blk)) + => MempoolCapacityBytesOverride + -> LedgerConfig blk + -> SlotNo + -> TickedLedgerState blk DiffMK + -- ^ The ticked ledger state againt which txs will be revalidated + -> LedgerTables (LedgerState blk) ValuesMK + -- ^ The tables with all the inputs for the transactions + -> TicketNo -- ^ 'isLastTicketNo' & 'vrLastTicketNo' + -> [TxTicket (TxMeasure blk) (Validated (GenTx blk))] + -> RevalidateTxsResult blk +revalidateTxsFor capacityOverride cfg slot st values lastTicketNo txTickets = + let theTxs = map txTicketTx txTickets + ReapplyTxsResult err val st' = + reapplyTxs cfg slot theTxs + $ applyDiffForKeysOnTables + values + (foldl (<>) emptyLedgerTables $ map (getTransactionKeySets . txForgetValidated) theTxs) + st + + -- TODO: This is ugly, but I couldn't find a way to sneak the 'TxTicket' into + -- 'reapplyTxs'. + filterTxTickets _ [] = [] + filterTxTickets (t1 : t1s) t2ss@(t2 : t2s) + | txId (txForgetValidated $ txTicketTx t1) == txId (txForgetValidated t2) + = t1 : filterTxTickets t1s t2s + | otherwise + = filterTxTickets t1s t2ss + filterTxTickets [] _ = + error "There are less transactions given to the revalidate function than transactions revalidated! This is unacceptable (and impossible)!" + + in RevalidateTxsResult + (IS { + isTxs = foldl (:>) TxSeq.Empty $ filterTxTickets txTickets val + , isTxIds = Set.fromList $ map (txId . txForgetValidated) val + , isLedgerState = st' + , isTip = castPoint $ getTip st + , isSlotNo = slot + , isLastTicketNo = lastTicketNo + , isCapacity = computeMempoolCapacity cfg st capacityOverride + }) + err + +data RevalidateTxsResult blk = + RevalidateTxsResult { + -- | The internal state after revalidation + newInternalState :: !(InternalState blk) + -- | The previously valid transactions that were now invalid + , removedTxs :: ![Invalidated blk] + } + +{------------------------------------------------------------------------------- + Conversions +-------------------------------------------------------------------------------} + -- | Create a Mempool Snapshot from a given Internal State of the mempool. snapshotFromIS :: forall blk. (HasTxId (GenTx blk), TxLimits blk) @@ -426,7 +376,7 @@ snapshotFromIS is = MempoolSnapshot { , snapshotHasTx = implSnapshotHasTx is , snapshotMempoolSize = implSnapshotGetMempoolSize is , snapshotSlotNo = isSlotNo is - , snapshotLedgerState = isLedgerState is + , snapshotState = isLedgerState is , snapshotTake = implSnapshotTake is } where @@ -460,63 +410,6 @@ snapshotFromIS is = MempoolSnapshot { -> MempoolSize implSnapshotGetMempoolSize = isMempoolSize -{------------------------------------------------------------------------------- - Validating txs or states --------------------------------------------------------------------------------} - --- | Given a (valid) internal state, validate it against the given ledger --- state and 'BlockSlot'. --- --- When these match the internal state's 'isTip' and 'isSlotNo', this is very --- cheap, as the given internal state will already be valid against the given --- inputs. --- --- When these don't match, the transaction in the internal state will be --- revalidated ('revalidateTxsFor'). -validateStateFor :: - (LedgerSupportsMempool blk, HasTxId (GenTx blk), ValidateEnvelope blk) - => MempoolCapacityBytesOverride - -> LedgerConfig blk - -> ForgeLedgerState blk - -> InternalState blk - -> ValidationResult (Validated (GenTx blk)) blk -validateStateFor capacityOverride cfg blockLedgerState is - | isTip == castHash (getTipHash st') - , isSlotNo == slot - = validationResultFromIS is - | otherwise - = revalidateTxsFor - capacityOverride - cfg - slot - st' - isLastTicketNo - (TxSeq.toList isTxs) - where - IS { isTxs, isTip, isSlotNo, isLastTicketNo } = is - (slot, st') = tickLedgerState cfg blockLedgerState - --- | Revalidate the given transactions (@['TxTicket' ('GenTx' blk)]@), which --- are /all/ the transactions in the Mempool against the given ticked ledger --- state, which corresponds to the chain's ledger state. -revalidateTxsFor :: - (LedgerSupportsMempool blk, HasTxId (GenTx blk)) - => MempoolCapacityBytesOverride - -> LedgerConfig blk - -> SlotNo - -> TickedLedgerState blk - -> TicketNo - -- ^ 'isLastTicketNo' & 'vrLastTicketNo' - -> [TxTicket (TxMeasure blk) (Validated (GenTx blk))] - -> ValidationResult (Validated (GenTx blk)) blk -revalidateTxsFor capacityOverride cfg slot st lastTicketNo txTickets = - repeatedly - (extendVRPrevApplied cfg) - txTickets - (validationResultFromIS is) - where - is = initInternalState capacityOverride lastTicketNo cfg slot st - {------------------------------------------------------------------------------- Tracing support for the mempool operations -------------------------------------------------------------------------------} @@ -546,7 +439,7 @@ data TraceEventMempool blk MempoolSize -- ^ The current size of the Mempool. | TraceMempoolManuallyRemovedTxs - [GenTxId blk] + (NE.NonEmpty (GenTxId blk)) -- ^ Transactions that have been manually removed from the Mempool. [Validated (GenTx blk)] -- ^ Previously valid transactions that are no longer valid because they @@ -561,15 +454,24 @@ data TraceEventMempool blk -- ^ Emitted when the mempool is adjusted after the tip has changed. EnclosingTimed -- ^ How long the sync operation took. + | TraceMempoolAttemptingSync + | TraceMempoolSyncNotNeeded (Point blk) (Point blk) + | TraceMempoolSyncDone + | TraceMempoolAttemptingAdd (GenTx blk) + | TraceMempoolLedgerFound (Point blk) + | TraceMempoolLedgerNotFound (Point blk) + deriving (Generic) deriving instance ( Eq (GenTx blk) , Eq (Validated (GenTx blk)) , Eq (GenTxId blk) , Eq (ApplyTxErr blk) + , StandardHash blk ) => Eq (TraceEventMempool blk) deriving instance ( Show (GenTx blk) , Show (Validated (GenTx blk)) , Show (GenTxId blk) , Show (ApplyTxErr blk) + , StandardHash blk ) => Show (TraceEventMempool blk) diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Mempool/Init.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Mempool/Init.hs index 31cec4c6b5..fdb4a4999d 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Mempool/Init.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Mempool/Init.hs @@ -15,14 +15,14 @@ import Ouroboros.Consensus.Block import Ouroboros.Consensus.HeaderValidation import Ouroboros.Consensus.Ledger.Abstract import Ouroboros.Consensus.Ledger.SupportsMempool -import Ouroboros.Consensus.Mempool.API +import Ouroboros.Consensus.Mempool.API (Mempool (..)) import Ouroboros.Consensus.Mempool.Capacity import Ouroboros.Consensus.Mempool.Impl.Common import Ouroboros.Consensus.Mempool.Query import Ouroboros.Consensus.Mempool.Update import Ouroboros.Consensus.Util.Enclose import Ouroboros.Consensus.Util.IOLike -import Ouroboros.Consensus.Util.STM (Watcher (..), forkLinkedWatcher) +import Ouroboros.Consensus.Util.STM {------------------------------------------------------------------------------- Opening the mempool @@ -106,17 +106,14 @@ mkMempool :: ) => MempoolEnv m blk -> Mempool m blk mkMempool mpEnv = Mempool - { addTx = implAddTx istate remoteFifo allFifo cfg trcr + { addTx = implAddTx mpEnv , removeTxs = implRemoveTxs mpEnv , syncWithLedger = implSyncWithLedger mpEnv - , getSnapshot = snapshotFromIS <$> readTVar istate - , getSnapshotFor = \fls -> pureGetSnapshotFor cfg fls co <$> readTVar istate - , getCapacity = isCapacity <$> readTVar istate + , getSnapshot = snapshotFromIS <$> readTMVar istate + , getSnapshotFor = implGetSnapshotFor mpEnv + , getCapacity = isCapacity <$> readTMVar istate } - where MempoolEnv { mpEnvStateVar = istate - , mpEnvAddTxsRemoteFifo = remoteFifo - , mpEnvAddTxsAllFifo = allFifo - , mpEnvLedgerCfg = cfg - , mpEnvTracer = trcr - , mpEnvCapacityOverride = co - } = mpEnv + where + MempoolEnv { + mpEnvStateVar = istate + } = mpEnv diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Mempool/Query.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Mempool/Query.hs index 74a9472169..e2c7e41d6b 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Mempool/Query.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Mempool/Query.hs @@ -1,29 +1,89 @@ +{-# LANGUAGE CPP #-} {-# LANGUAGE FlexibleContexts #-} -- | Queries to the mempool -module Ouroboros.Consensus.Mempool.Query (pureGetSnapshotFor) where +module Ouroboros.Consensus.Mempool.Query ( + implGetSnapshotFor + , pureGetSnapshotFor + ) where -import Ouroboros.Consensus.HeaderValidation +#if __GLASGOW_HASKELL__ < 910 +import Data.Foldable (foldl') +#endif +import Ouroboros.Consensus.Block.Abstract import Ouroboros.Consensus.Ledger.Abstract import Ouroboros.Consensus.Ledger.SupportsMempool +import Ouroboros.Consensus.Ledger.Tables.Utils (emptyLedgerTables) import Ouroboros.Consensus.Mempool.API import Ouroboros.Consensus.Mempool.Capacity import Ouroboros.Consensus.Mempool.Impl.Common +import qualified Ouroboros.Consensus.Mempool.TxSeq as TxSeq +import Ouroboros.Consensus.Util.IOLike + +implGetSnapshotFor :: + ( IOLike m + , LedgerSupportsMempool blk + , HasTxId (GenTx blk) + ) + => MempoolEnv m blk + -> SlotNo -- ^ Get snapshot for this slot number (usually the current slot) + -> TickedLedgerState blk DiffMK -- ^ The ledger state at 'pt' ticked to 'slot' + -> (LedgerTables (LedgerState blk) KeysMK -> m (LedgerTables (LedgerState blk) ValuesMK)) + -- ^ A function that returns values corresponding to the given keys for + -- the unticked ledger state at 'pt'. + -> m (MempoolSnapshot blk) +implGetSnapshotFor mpEnv slot ticked readUntickedTables = do + is <- atomically $ readTMVar istate + if pointHash (isTip is) == castHash (getTipHash ticked) && + isSlotNo is == slot + then + -- We are looking for a snapshot exactly for the ledger state we already + -- have cached, then just return it. + pure . snapshotFromIS $ is + else do + let keys = foldl' (<>) emptyLedgerTables + $ map getTransactionKeySets + $ [ txForgetValidated . TxSeq.txTicketTx $ tx + | tx <- TxSeq.toList $ isTxs is + ] + values <- readUntickedTables keys + pure $ getSnap is values + where + getSnap is tbs = pureGetSnapshotFor + capacityOverride + cfg + tbs + is + (ForgeInKnownSlot slot ticked) + MempoolEnv { mpEnvStateVar = istate + , mpEnvLedgerCfg = cfg + , mpEnvCapacityOverride = capacityOverride + } = mpEnv -- | Get a snapshot of the mempool state that is valid with respect to --- the given ledger state +-- the given ledger state, together with the ticked ledger state. pureGetSnapshotFor :: ( LedgerSupportsMempool blk , HasTxId (GenTx blk) - , ValidateEnvelope blk ) - => LedgerConfig blk - -> ForgeLedgerState blk - -> MempoolCapacityBytesOverride + => MempoolCapacityBytesOverride + -> LedgerConfig blk + -> LedgerTables (LedgerState blk) ValuesMK -> InternalState blk + -> ForgeLedgerState blk -> MempoolSnapshot blk -pureGetSnapshotFor cfg blockLedgerState capacityOverride = - snapshotFromIS - . internalStateFromVR - . validateStateFor capacityOverride cfg blockLedgerState - +pureGetSnapshotFor _ _ _ _ ForgeInUnknownSlot{} = + error "Tried to get a snapshot for unknown slot" +pureGetSnapshotFor capacityOverride cfg values is (ForgeInKnownSlot slot st) = + snapshotFromIS $ + if pointHash (isTip is) == castHash (getTipHash st) && isSlotNo is == slot + then is + else newInternalState + $ revalidateTxsFor + capacityOverride + cfg + slot + st + values + (isLastTicketNo is) + (TxSeq.toList $ isTxs is) diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Mempool/Update.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Mempool/Update.hs index 372ea15c29..40e0bd8127 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Mempool/Update.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Mempool/Update.hs @@ -8,51 +8,46 @@ module Ouroboros.Consensus.Mempool.Update ( , implSyncWithLedger ) where -import Control.Concurrent.Class.MonadMVar (MVar, withMVar) -import Control.Exception (assert) +import Cardano.Slotting.Slot +import Control.Concurrent.Class.MonadMVar (withMVar) +import Control.Monad (void) import Control.Monad.Except (runExcept) import Control.Tracer -import Data.Maybe (isJust) +import qualified Data.List.NonEmpty as NE +import Data.Maybe (fromMaybe) import qualified Data.Measure as Measure import qualified Data.Set as Set import Ouroboros.Consensus.HeaderValidation import Ouroboros.Consensus.Ledger.Abstract import Ouroboros.Consensus.Ledger.SupportsMempool +import Ouroboros.Consensus.Ledger.Tables.Utils import Ouroboros.Consensus.Mempool.API import Ouroboros.Consensus.Mempool.Capacity import Ouroboros.Consensus.Mempool.Impl.Common import Ouroboros.Consensus.Mempool.TxSeq (TxTicket (..)) import qualified Ouroboros.Consensus.Mempool.TxSeq as TxSeq -import Ouroboros.Consensus.Util (whenJust) +import Ouroboros.Consensus.Util (whenJust, withTMVarAnd) import Ouroboros.Consensus.Util.IOLike hiding (withMVar) +import Ouroboros.Network.Block {------------------------------------------------------------------------------- Add transactions -------------------------------------------------------------------------------} -- | Add a single transaction to the mempool, blocking if there is no space. --- implAddTx :: - ( MonadSTM m - , MonadMVar m + ( IOLike m , LedgerSupportsMempool blk + , ValidateEnvelope blk , HasTxId (GenTx blk) ) - => StrictTVar m (InternalState blk) - -- ^ The InternalState TVar. - -> MVar m () - -- ^ The FIFO for remote peers - -> MVar m () - -- ^ The FIFO for all remote peers and local clients - -> LedgerConfig blk - -- ^ The configuration of the ledger. - -> Tracer m (TraceEventMempool blk) + => MempoolEnv m blk -> AddTxOnBehalfOf -- ^ Whether we're acting on behalf of a remote peer or a local client. -> GenTx blk -- ^ The transaction to add to the mempool. -> m (MempoolAddTxResult blk) -implAddTx istate remoteFifo allFifo cfg trcr onbehalf tx = +implAddTx mpEnv onbehalf tx = -- To ensure fair behaviour between threads that are trying to add -- transactions, we make them all queue in a fifo. Only the one at the head -- of the queue gets to actually wait for space to get freed up in the @@ -72,7 +67,7 @@ implAddTx istate remoteFifo allFifo cfg trcr onbehalf tx = case onbehalf of AddTxForRemotePeer -> withMVar remoteFifo $ \() -> - withMVar allFifo $ \() -> + withMVar allFifo $ \() -> -- This action can also block. Holding the MVars means -- there is only a single such thread blocking at once. implAddTx' @@ -84,17 +79,18 @@ implAddTx istate remoteFifo allFifo cfg trcr onbehalf tx = -- threads waiting. implAddTx' where - implAddTx' = do - (result, ev) <- atomically $ do - outcome <- implTryAddTx istate cfg - (whetherToIntervene onbehalf) - tx - case outcome of - TryAddTx _ result ev -> do return (result, ev) - - -- or block until space is available to fit the next transaction - NotEnoughSpaceLeft -> retry + MempoolEnv { + mpEnvAddTxsRemoteFifo = remoteFifo + , mpEnvAddTxsAllFifo = allFifo + , mpEnvTracer = trcr + } = mpEnv + implAddTx' = do + TransactionProcessingResult _ result ev <- + doAddTx + mpEnv + (whetherToIntervene onbehalf) + tx traceWith trcr ev return result @@ -102,21 +98,25 @@ implAddTx istate remoteFifo allFifo cfg trcr onbehalf tx = whetherToIntervene AddTxForRemotePeer = DoNotIntervene whetherToIntervene AddTxForLocalClient = Intervene --- | Result of trying to add a transaction to the mempool. -data TryAddTx blk = +-- | Tried to add a transaction, was it processed or is there no space left? +data TriedToAddTx blk = -- | Adding the next transaction would put the mempool over capacity. NotEnoughSpaceLeft - -- | A transaction was processed. - | TryAddTx - (Maybe (InternalState blk)) - -- ^ If the transaction was accepted, the new state that can be written to - -- the TVar. - (MempoolAddTxResult blk) - -- ^ The result of trying to add the transaction to the mempool. - (TraceEventMempool blk) - -- ^ The event emitted by the operation. + | Processed (TransactionProcessed blk) + +-- | A transaction was processed, either accepted or rejected. +data TransactionProcessed blk = + TransactionProcessingResult + (Maybe (InternalState blk)) + -- ^ If the transaction was accepted, the new state that can be written to + -- the TVar. + (MempoolAddTxResult blk) + -- ^ The result of trying to add the transaction to the mempool. + (TraceEventMempool blk) + -- ^ The event emitted by the operation. --- | Add a single transaction by interpreting a 'TryAddTx' from 'pureTryAddTx'. +-- | This function returns whether the transaction was added or rejected, and +-- will block if the mempool is full. -- -- This function returns whether the transaction was added or rejected, or if -- the Mempool capacity is reached. See 'implAddTx' for a function that blocks @@ -128,34 +128,74 @@ data TryAddTx blk = -- See the necessary invariants on the Haddock for 'API.addTxs'. -- -- This function does not sync the Mempool contents with the ledger state in --- case the latter changes, it relies on the background thread to do that. +-- case the latter changes in a way that doesn't invalidate the db changelog, it +-- relies on the background thread to do that. If the db changelog is +-- invalidated (by rolling back the last synced ledger state), it will sync +-- in-place. -- -- INVARIANT: The code needs that read and writes on the state are coupled --- together or inconsistencies will arise. To ensure that STM transactions are --- short, each iteration of the helper function is a separate STM transaction. -implTryAddTx :: - ( MonadSTM m - , LedgerSupportsMempool blk +-- together or inconsistencies will arise. +doAddTx :: + ( LedgerSupportsMempool blk , HasTxId (GenTx blk) + , ValidateEnvelope blk + , IOLike m ) - => StrictTVar m (InternalState blk) - -- ^ The InternalState TVar. - -> LedgerConfig blk - -- ^ The configuration of the ledger. + => MempoolEnv m blk -> WhetherToIntervene -> GenTx blk -- ^ The transaction to add to the mempool. - -> STM m (TryAddTx blk) -implTryAddTx istate cfg wti tx = do - is <- readTVar istate - let outcome = pureTryAddTx cfg wti tx is - case outcome of - TryAddTx (Just is') _ _ -> writeTVar istate is' - TryAddTx Nothing _ _ -> return () - NotEnoughSpaceLeft -> return () - return outcome + -> m (TransactionProcessed blk) +doAddTx mpEnv wti tx = + doAddTx' Nothing + where + MempoolEnv { + mpEnvLedger = ldgrInterface + , mpEnvLedgerCfg = cfg + , mpEnvStateVar = istate + , mpEnvTracer = trcr + } = mpEnv --- | See the documentation of 'implTryAddTx' for some more context. + doAddTx' s = do + traceWith trcr $ TraceMempoolAttemptingAdd tx + res <- withTMVarAnd istate (\is -> + case s of + Nothing -> pure () + Just s' -> check $ isMempoolSize is /= s') + $ \is () -> do + mTbs <- getLedgerTablesAtFor ldgrInterface (isTip is) [tx] + case mTbs of + Just tbs -> do + traceWith trcr $ TraceMempoolLedgerFound (isTip is) + case pureTryAddTx cfg wti tx is tbs of + NotEnoughSpaceLeft -> do + pure (Retry (isMempoolSize is), is) + Processed outcome@(TransactionProcessingResult is' _ _) -> do + pure (OK outcome, fromMaybe is is') + Nothing -> do + traceWith trcr $ TraceMempoolLedgerNotFound (isTip is) + -- We couldn't retrieve the values because the state is no longer on + -- the db. We need to resync. + pure (Resync, is) + case res of + Retry s' -> doAddTx' (Just s') + OK outcome -> pure outcome + Resync -> do + void $ implSyncWithLedger mpEnv + doAddTx' s + +data WithTMVarOutcome retry ok = + Retry retry + | OK ok + | Resync + +-- | Craft a 'TriedToAddTx' value containing the resulting state if +-- applicable, the tracing event and the result of adding this transaction. See +-- the documentation of 'implAddTx' for some more context. +-- +-- It returns 'NoSpaceLeft' only when the current mempool size is bigger or +-- equal than then mempool capacity. Otherwise it will validate the transaction +-- and add it to the mempool if there is at least one byte free on the mempool. pureTryAddTx :: ( LedgerSupportsMempool blk , HasTxId (GenTx blk) @@ -167,9 +207,11 @@ pureTryAddTx :: -- ^ The transaction to add to the mempool. -> InternalState blk -- ^ The current internal state of the mempool. - -> TryAddTx blk -pureTryAddTx cfg wti tx is = - case runExcept $ txMeasure cfg (isLedgerState is) tx of + -> LedgerTables (LedgerState blk) ValuesMK + -> TriedToAddTx blk +pureTryAddTx cfg wti tx is values = + let st = applyDiffForKeysOnTables values (getTransactionKeySets tx) (isLedgerState is) in + case runExcept $ txMeasure cfg st tx of Left err -> -- The transaction does not have a valid measure (eg its ExUnits is -- greater than what this ledger state allows for a single transaction). @@ -181,7 +223,7 @@ pureTryAddTx cfg wti tx is = -- selection changed, even if the tx wouldn't fit. So it'd very much be -- as if the mempool were effectively over capacity! What's worse, each -- attempt would not be using 'extendVRPrevApplied'. - TryAddTx + Processed $ TransactionProcessingResult Nothing (MempoolTxRejected tx err) (TraceMempoolRejectedTx @@ -244,9 +286,9 @@ pureTryAddTx cfg wti tx is = NotEnoughSpaceLeft | otherwise -> - case extendVRNew cfg wti tx $ validationResultFromIS is of - Left err -> - TryAddTx + case validateNewTransaction cfg wti tx txsz st is of + (Left err, _) -> + Processed $ TransactionProcessingResult Nothing (MempoolTxRejected tx err) (TraceMempoolRejectedTx @@ -254,11 +296,8 @@ pureTryAddTx cfg wti tx is = err (isMempoolSize is) ) - Right (vtx, vr) -> - let is' = internalStateFromVR vr - in - assert (isJust (vrNewValid vr)) $ - TryAddTx + (Right vtx, is') -> + Processed $ TransactionProcessingResult (Just is') (MempoolTxAdded vtx) (TraceMempoolAddedTx @@ -273,12 +312,6 @@ pureTryAddTx cfg wti tx is = Remove transactions -------------------------------------------------------------------------------} --- | A datatype containing the state resulting after removing the requested --- transactions from the mempool and maybe a message to be traced while removing --- them. -data RemoveTxs blk = - WriteRemoveTxs (InternalState blk) (Maybe (TraceEventMempool blk)) - -- | See 'Ouroboros.Consensus.Mempool.API.removeTxs'. implRemoveTxs :: ( IOLike m @@ -286,133 +319,166 @@ implRemoveTxs :: , HasTxId (GenTx blk) , ValidateEnvelope blk ) - => MempoolEnv m blk - -> [GenTxId blk] - -> m () -implRemoveTxs menv txs - | null txs = pure () - | otherwise = do - tr <- atomically $ do - is <- readTVar istate - ls <- getCurrentLedgerState ldgrInterface - let WriteRemoveTxs is' t = pureRemoveTxs cfg co txs is ls - writeTVar istate is' - pure t - whenJust tr (traceWith trcr) + => MempoolEnv m blk + -> NE.NonEmpty (GenTxId blk) + -> m () +implRemoveTxs mpEnv toRemove = do + out <- withTMVarAnd istate (const $ getCurrentLedgerState ldgrInterface) + $ \is ls -> do + let toKeep = filter + ( (`notElem` Set.fromList (NE.toList toRemove)) + . txId + . txForgetValidated + . txTicketTx + ) + (TxSeq.toList $ isTxs is) + (slot, ticked) = tickLedgerState cfg (ForgeInUnknownSlot ls) + toKeep' = [ txForgetValidated . TxSeq.txTicketTx $ tx | tx <- toKeep ] + mTbs <- getLedgerTablesAtFor ldgrInterface (castPoint (getTip ls)) toKeep' + case mTbs of + Nothing -> pure (Resync, is) + Just tbs -> do + let (is', t) = pureRemoveTxs + capacityOverride + cfg + slot + ticked + tbs + (isLastTicketNo is) + toKeep + toRemove + traceWith trcr t + pure (OK (), is') + case out of + Resync -> do + void $ implSyncWithLedger mpEnv + implRemoveTxs mpEnv toRemove + OK () -> pure () + Retry _ -> error "Impossible!" where - MempoolEnv { mpEnvStateVar = istate - , mpEnvLedger = ldgrInterface - , mpEnvTracer = trcr - , mpEnvLedgerCfg = cfg - , mpEnvCapacityOverride = co - } = menv + MempoolEnv { mpEnvStateVar = istate + , mpEnvLedger = ldgrInterface + , mpEnvTracer = trcr + , mpEnvLedgerCfg = cfg + , mpEnvCapacityOverride = capacityOverride + } = mpEnv -- | Craft a 'RemoveTxs' that manually removes the given transactions from the -- mempool, returning inside it an updated InternalState. pureRemoveTxs :: ( LedgerSupportsMempool blk , HasTxId (GenTx blk) - , ValidateEnvelope blk ) - => LedgerConfig blk - -> MempoolCapacityBytesOverride - -> [GenTxId blk] - -> InternalState blk - -> LedgerState blk - -> RemoveTxs blk -pureRemoveTxs cfg capacityOverride txIds is lstate = - -- Filtering is O(n), but this function will rarely be used, as it is an - -- escape hatch when there's an inconsistency between the ledger and the - -- mempool. - let toRemove = Set.fromList txIds - txTickets' = filter - ( (`notElem` toRemove) - . txId - . txForgetValidated - . txTicketTx - ) - (TxSeq.toList (isTxs is)) - (slot, ticked) = tickLedgerState cfg (ForgeInUnknownSlot lstate) - vr = revalidateTxsFor - capacityOverride - cfg - slot - ticked - (isLastTicketNo is) - txTickets' - is' = internalStateFromVR vr - needsTrace = if null txIds - then - Nothing - else - Just $ TraceMempoolManuallyRemovedTxs - txIds - (map fst (vrInvalid vr)) - (isMempoolSize is') - in WriteRemoveTxs is' needsTrace + => MempoolCapacityBytesOverride + -> LedgerConfig blk + -> SlotNo + -> TickedLedgerState blk DiffMK + -> LedgerTables (LedgerState blk) ValuesMK + -> TicketNo + -> [TxTicket (TxMeasure blk) (Validated (GenTx blk))] -- ^ Txs to keep + -> NE.NonEmpty (GenTxId blk) -- ^ IDs to remove + -> (InternalState blk, TraceEventMempool blk) +pureRemoveTxs capacityOverride lcfg slot lstate values tkt txs txIds = + let RevalidateTxsResult is' removed = + revalidateTxsFor + capacityOverride + lcfg + slot + lstate + values + tkt + txs + trace = TraceMempoolManuallyRemovedTxs + txIds + (map getInvalidated removed) + (isMempoolSize is') + in (is', trace) {------------------------------------------------------------------------------- Sync with ledger -------------------------------------------------------------------------------} --- | A datatype containing the new state produced by syncing with the Ledger, a --- snapshot of that mempool state and, if needed, a tracing message. -data SyncWithLedger blk = - NewSyncedState (InternalState blk) - (MempoolSnapshot blk) - (Maybe (TraceEventMempool blk)) - -- | See 'Ouroboros.Consensus.Mempool.API.syncWithLedger'. implSyncWithLedger :: - ( - IOLike m + ( IOLike m , LedgerSupportsMempool blk - , HasTxId (GenTx blk) , ValidateEnvelope blk + , HasTxId (GenTx blk) ) => MempoolEnv m blk -> m (MempoolSnapshot blk) -implSyncWithLedger menv = do - (mTrace, mp) <- atomically $ do - is <- readTVar istate - ls <- getCurrentLedgerState ldgrInterface - let NewSyncedState is' msp mTrace = pureSyncWithLedger is ls cfg co - writeTVar istate is' - return (mTrace, msp) - whenJust mTrace (traceWith trcr) - return mp +implSyncWithLedger mpEnv = do + traceWith trcr TraceMempoolAttemptingSync + res <- withTMVarAnd istate (const $ getCurrentLedgerState ldgrInterface) $ + \is ls -> do + let (slot, ls') = tickLedgerState cfg $ ForgeInUnknownSlot ls + if pointHash (isTip is) == castHash (getTipHash ls) && isSlotNo is == slot + then do + -- The tip didn't change, put the same state. + traceWith trcr $ TraceMempoolSyncNotNeeded (isTip is) (castPoint $ getTip ls) + pure (OK (snapshotFromIS is), is) + else do + -- We need to revalidate + let pt = castPoint (getTip ls) + txs = [ txForgetValidated . TxSeq.txTicketTx $ tx + | tx <- TxSeq.toList $ isTxs is + ] + mTbs <- getLedgerTablesAtFor ldgrInterface pt txs + case mTbs of + Just tbs -> do + let (is', mTrace) = pureSyncWithLedger + capacityOverride + cfg + slot + ls' + tbs + is + whenJust mTrace (traceWith trcr) + traceWith trcr TraceMempoolSyncDone + pure (OK (snapshotFromIS is'), is') + Nothing -> do + -- If the point is gone, resync + pure (Resync, is) + case res of + OK v -> pure v + Resync -> implSyncWithLedger mpEnv + Retry _ -> error "Impossible!" where - MempoolEnv { mpEnvStateVar = istate - , mpEnvLedger = ldgrInterface - , mpEnvTracer = trcr - , mpEnvLedgerCfg = cfg - , mpEnvCapacityOverride = co - } = menv + MempoolEnv { mpEnvStateVar = istate + , mpEnvLedger = ldgrInterface + , mpEnvTracer = trcr + , mpEnvLedgerCfg = cfg + , mpEnvCapacityOverride = capacityOverride + } = mpEnv -- | Create a 'SyncWithLedger' value representing the values that will need to -- be stored for committing this synchronization with the Ledger. -- -- See the documentation of 'runSyncWithLedger' for more context. -pureSyncWithLedger :: - (LedgerSupportsMempool blk, HasTxId (GenTx blk), ValidateEnvelope blk) - => InternalState blk - -> LedgerState blk +pureSyncWithLedger + :: (LedgerSupportsMempool blk, HasTxId (GenTx blk)) + => MempoolCapacityBytesOverride -> LedgerConfig blk - -> MempoolCapacityBytesOverride - -> SyncWithLedger blk -pureSyncWithLedger istate lstate lcfg capacityOverride = - let vr = validateStateFor - capacityOverride - lcfg - (ForgeInUnknownSlot lstate) - istate - removed = vrInvalid vr - istate' = internalStateFromVR vr - mTrace = if null removed - then - Nothing - else - Just $ TraceMempoolRemoveTxs removed (isMempoolSize istate') - snapshot = snapshotFromIS istate' - in - NewSyncedState istate' snapshot mTrace + -> SlotNo + -> TickedLedgerState blk DiffMK + -> LedgerTables (LedgerState blk) ValuesMK + -> InternalState blk + -> ( InternalState blk + , Maybe (TraceEventMempool blk) + ) +pureSyncWithLedger capacityOverride lcfg slot lstate values istate = + let RevalidateTxsResult is' removed = + revalidateTxsFor + capacityOverride + lcfg + slot + lstate + values + (isLastTicketNo istate) + (TxSeq.toList $ isTxs istate) + mTrace = if null removed + then + Nothing + else + Just $ TraceMempoolRemoveTxs (map (\x -> (getInvalidated x, getReason x)) removed) (isMempoolSize is') + in (is', mTrace) diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/BlockFetch/ClientInterface.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/BlockFetch/ClientInterface.hs index b0d5f1cbb1..d35f046bc0 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/BlockFetch/ClientInterface.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/BlockFetch/ClientInterface.hs @@ -74,7 +74,6 @@ initSlotForgeTimeOracle :: , BlockSupportsProtocol blk , History.HasHardForkHistory blk , SupportsNode.ConfigSupportsNode blk - , IsLedger (LedgerState blk) ) => TopLevelConfig blk -> ChainDB m blk @@ -123,7 +122,7 @@ initSlotForgeTimeOracle cfg chainDB = do pure slotForgeTime where toSummary :: - ExtLedgerState blk + ExtLedgerState blk EmptyMK -> History.Summary (History.HardForkIndices blk) toSummary = History.hardForkSummary (configLedger cfg) . ledgerState diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ChainSync/Client.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ChainSync/Client.hs index ae0edd3420..045ea279ef 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ChainSync/Client.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ChainSync/Client.hs @@ -100,7 +100,7 @@ import Ouroboros.Consensus.HeaderStateHistory validateHeader) import qualified Ouroboros.Consensus.HeaderStateHistory as HeaderStateHistory import Ouroboros.Consensus.HeaderValidation hiding (validateHeader) -import Ouroboros.Consensus.Ledger.Basics (LedgerState) +import Ouroboros.Consensus.Ledger.Basics import Ouroboros.Consensus.Ledger.Extended import Ouroboros.Consensus.Ledger.SupportsProtocol import Ouroboros.Consensus.MiniProtocol.ChainSync.Client.HistoricityCheck @@ -151,7 +151,7 @@ data ChainDbView m blk = ChainDbView { , getHeaderStateHistory :: STM m (HeaderStateHistory blk) , - getPastLedger :: Point blk -> STM m (Maybe (ExtLedgerState blk)) + getPastLedger :: Point blk -> STM m (Maybe (ExtLedgerState blk EmptyMK)) , getIsInvalidBlock :: STM m @@ -208,8 +208,7 @@ newtype CSJEnabledConfig = CSJEnabledConfig { } defaultChainDbView :: - (IOLike m, LedgerSupportsProtocol blk) - => ChainDB m blk -> ChainDbView m blk + ChainDB m blk -> ChainDbView m blk defaultChainDbView chainDB = ChainDbView { getCurrentChain = ChainDB.getCurrentChain chainDB , getHeaderStateHistory = ChainDB.getHeaderStateHistory chainDB @@ -1693,7 +1692,7 @@ checkTime cfgEnv dynEnv intEnv = checkArrivalTime :: KnownIntersectionState blk -> arrival - -> WithEarlyExit m (Intersects blk (LedgerState blk, RelativeTime)) + -> WithEarlyExit m (Intersects blk (LedgerState blk EmptyMK, RelativeTime)) checkArrivalTime kis arrival = do Intersects kis' (lst, judgment) <- do readLedgerState kis $ \lst -> @@ -1716,14 +1715,14 @@ checkTime cfgEnv dynEnv intEnv = readLedgerState :: forall a. KnownIntersectionState blk - -> (LedgerState blk -> Maybe a) + -> (LedgerState blk EmptyMK -> Maybe a) -> WithEarlyExit m (Intersects blk a) readLedgerState kis prj = castM $ readLedgerStateHelper kis prj readLedgerStateHelper :: forall a. KnownIntersectionState blk - -> (LedgerState blk -> Maybe a) + -> (LedgerState blk EmptyMK -> Maybe a) -> m (WithEarlyExit m (Intersects blk a)) readLedgerStateHelper kis prj = atomically $ do -- We must first find the most recent intersection with the current @@ -1754,7 +1753,7 @@ checkTime cfgEnv dynEnv intEnv = -- that far into the future. projectLedgerView :: SlotNo - -> LedgerState blk + -> LedgerState blk EmptyMK -> Maybe (LedgerView (BlockProtocol blk)) projectLedgerView slot lst = let forecast = ledgerViewForecastAt (configLedger cfg) lst diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ChainSync/Client/InFutureCheck.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ChainSync/Client/InFutureCheck.hs index 052aede894..c2db504a41 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ChainSync/Client/InFutureCheck.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/ChainSync/Client/InFutureCheck.hs @@ -38,7 +38,8 @@ import Ouroboros.Consensus.HardFork.Abstract (HasHardForkHistory, import Ouroboros.Consensus.HardFork.History (PastHorizonException) import Ouroboros.Consensus.HardFork.History.Qry (runQuery, slotToWallclock) -import Ouroboros.Consensus.Ledger.Basics (LedgerConfig, LedgerState) +import Ouroboros.Consensus.Ledger.Basics (EmptyMK, LedgerConfig, + LedgerState) import Ouroboros.Consensus.Util.Time (nominalDelay, secondsToNominalDiffTime) import Ouroboros.Network.Block (HasHeader) @@ -71,7 +72,7 @@ data HeaderInFutureCheck m blk arrival judgment = HeaderInFutureCheck { -- returns 'Ouroboros.Consensus.HardFork.HistoryPastHorizon'. judgeHeaderArrival :: LedgerConfig blk - -> LedgerState blk + -> LedgerState blk EmptyMK -> arrival -> Except PastHorizonException judgment , diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/LocalStateQuery/Server.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/LocalStateQuery/Server.hs index cd85b08761..34f5a49ccb 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/LocalStateQuery/Server.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/MiniProtocol/LocalStateQuery/Server.hs @@ -1,28 +1,36 @@ +{-# LANGUAGE LambdaCase #-} {-# LANGUAGE ScopedTypeVariables #-} -{-# LANGUAGE TupleSections #-} module Ouroboros.Consensus.MiniProtocol.LocalStateQuery.Server (localStateQueryServer) where + +import Data.Functor ((<&>)) import Ouroboros.Consensus.Block -import Ouroboros.Consensus.HeaderValidation (HasAnnTip (..)) import Ouroboros.Consensus.Ledger.Extended -import Ouroboros.Consensus.Ledger.Query +import Ouroboros.Consensus.Ledger.Query (BlockSupportsLedgerQuery, + Query) +import qualified Ouroboros.Consensus.Ledger.Query as Query +import Ouroboros.Consensus.Ledger.SupportsProtocol + (LedgerSupportsProtocol) +import Ouroboros.Consensus.Storage.LedgerDB import Ouroboros.Consensus.Util.IOLike import Ouroboros.Network.Protocol.LocalStateQuery.Server import Ouroboros.Network.Protocol.LocalStateQuery.Type (AcquireFailure (..), Target (..)) localStateQueryServer :: - forall m blk. (IOLike m, BlockSupportsLedgerQuery blk, ConfigSupportsNode blk, HasAnnTip blk) + forall m blk. + ( IOLike m + , BlockSupportsLedgerQuery blk + , Query.ConfigSupportsNode blk + , LedgerSupportsProtocol blk + ) => ExtLedgerCfg blk - -> STM m (Point blk) - -- ^ Get tip point - -> (Point blk -> STM m (Maybe (ExtLedgerState blk))) - -- ^ Get a past ledger - -> STM m (Point blk) - -- ^ Get the immutable point + -> ( Target (Point blk) + -> m (Either GetForkerError (ReadOnlyForker' m blk)) + ) -> LocalStateQueryServer blk (Point blk) (Query blk) m () -localStateQueryServer cfg getTipPoint getPastLedger getImmutablePoint = +localStateQueryServer cfg getView = LocalStateQueryServer $ return idle where idle :: ServerStIdle blk (Point blk) (Query blk) m () @@ -33,36 +41,29 @@ localStateQueryServer cfg getTipPoint getPastLedger getImmutablePoint = handleAcquire :: Target (Point blk) -> m (ServerStAcquiring blk (Point blk) (Query blk) m ()) - handleAcquire tpt = do - (pt, mPastLedger, immutablePoint) <- atomically $ do - pt <- case tpt of - VolatileTip -> getTipPoint - SpecificPoint point -> pure point - ImmutableTip -> getImmutablePoint - (pt,,) <$> getPastLedger pt <*> getImmutablePoint - - return $ case mPastLedger of - Just pastLedger - -> SendMsgAcquired $ acquired pastLedger - Nothing - | pointSlot pt < pointSlot immutablePoint - -> SendMsgFailure AcquireFailurePointTooOld idle - | otherwise - -> SendMsgFailure AcquireFailurePointNotOnChain idle + handleAcquire mpt = do + getView mpt <&> \case + Right forker -> SendMsgAcquired $ acquired forker + Left e -> case e of + PointTooOld -> + SendMsgFailure AcquireFailurePointTooOld idle + PointNotOnChain -> + SendMsgFailure AcquireFailurePointNotOnChain idle - acquired :: ExtLedgerState blk + acquired :: ReadOnlyForker' m blk -> ServerStAcquired blk (Point blk) (Query blk) m () - acquired st = ServerStAcquired { - recvMsgQuery = handleQuery st - , recvMsgReAcquire = handleAcquire - , recvMsgRelease = return idle + acquired forker = ServerStAcquired { + recvMsgQuery = handleQuery forker + , recvMsgReAcquire = \mp -> do close; handleAcquire mp + , recvMsgRelease = do close; return idle } + where + close = roforkerClose forker handleQuery :: - ExtLedgerState blk + ReadOnlyForker' m blk -> Query blk result -> m (ServerStQuerying blk (Point blk) (Query blk) m () result) - handleQuery st query = return $ - SendMsgResult - (answerQuery cfg query st) - (acquired st) + handleQuery forker query = do + result <- Query.answerQuery cfg forker query + return $ SendMsgResult result (acquired forker) diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Node/ProtocolInfo.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Node/ProtocolInfo.hs index 80741018c5..c3ca013806 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Node/ProtocolInfo.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Node/ProtocolInfo.hs @@ -13,6 +13,7 @@ import Data.Word import NoThunks.Class (NoThunks) import Ouroboros.Consensus.Block import Ouroboros.Consensus.Config +import Ouroboros.Consensus.Ledger.Basics (ValuesMK) import Ouroboros.Consensus.Ledger.Extended import Ouroboros.Consensus.NodeId @@ -35,7 +36,7 @@ enumCoreNodes (NumCoreNodes numNodes) = -- | Data required to run the specified protocol. data ProtocolInfo b = ProtocolInfo { pInfoConfig :: TopLevelConfig b - , pInfoInitLedger :: ExtLedgerState b -- ^ At genesis + , pInfoInitLedger :: ExtLedgerState b ValuesMK -- ^ At genesis } -- | Data required by clients of a node running the specified protocol. diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Node/Run.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Node/Run.hs index 1fe2ae42ee..85000efe60 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Node/Run.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Node/Run.hs @@ -1,4 +1,6 @@ {-# LANGUAGE FlexibleContexts #-} +{-# LANGUAGE QuantifiedConstraints #-} +{-# LANGUAGE RankNTypes #-} -- | Infrastructure required to run a node -- @@ -6,7 +8,7 @@ module Ouroboros.Consensus.Node.Run ( -- * SerialiseDisk ImmutableDbSerialiseConstraints - , LgrDbSerialiseConstraints + , LedgerDbSerialiseConstraints , SerialiseDiskConstraints , VolatileDbSerialiseConstraints -- * SerialiseNodeToNode @@ -32,7 +34,7 @@ import Ouroboros.Consensus.Node.NetworkProtocolVersion import Ouroboros.Consensus.Node.Serialisation import Ouroboros.Consensus.Storage.ChainDB (ImmutableDbSerialiseConstraints, - LgrDbSerialiseConstraints, SerialiseDiskConstraints, + LedgerDbSerialiseConstraints, SerialiseDiskConstraints, VolatileDbSerialiseConstraints) import Ouroboros.Consensus.Storage.Serialisation import Ouroboros.Consensus.Util (ShowProxy) @@ -71,38 +73,39 @@ class ( Typeable blk , SerialiseNodeToClient blk (GenTxId blk) , SerialiseNodeToClient blk SlotNo , SerialiseNodeToClient blk (ApplyTxErr blk) - , SerialiseNodeToClient blk (SomeSecond BlockQuery blk) - , SerialiseResult blk (BlockQuery blk) + , SerialiseNodeToClient blk (SomeBlockQuery (BlockQuery blk)) + , SerialiseResult' blk BlockQuery ) => SerialiseNodeToClientConstraints blk -class ( LedgerSupportsProtocol blk - , InspectLedger blk - , HasHardForkHistory blk - , LedgerSupportsMempool blk - , HasTxId (GenTx blk) - , BlockSupportsLedgerQuery blk - , SupportedNetworkProtocolVersion blk - , ConfigSupportsNode blk - , ConvertRawHash blk - , CommonProtocolParams blk - , HasBinaryBlockInfo blk - , SerialiseDiskConstraints blk - , SerialiseNodeToNodeConstraints blk - , SerialiseNodeToClientConstraints blk - , LedgerSupportsPeerSelection blk - , NodeInitStorage blk - , BlockSupportsMetrics blk - , BlockSupportsDiffusionPipelining blk - , BlockSupportsSanityCheck blk - , Show (CannotForge blk) - , Show (ForgeStateInfo blk) - , Show (ForgeStateUpdateError blk) - , ShowProxy blk - , ShowProxy (ApplyTxErr blk) - , ShowProxy (GenTx blk) - , ShowProxy (Header blk) - , ShowProxy (BlockQuery blk) - , ShowProxy (TxId (GenTx blk)) +class ( LedgerSupportsProtocol blk + , InspectLedger blk + , HasHardForkHistory blk + , LedgerSupportsMempool blk + , HasTxId (GenTx blk) + , BlockSupportsLedgerQuery blk + , SupportedNetworkProtocolVersion blk + , ConfigSupportsNode blk + , ConvertRawHash blk + , CommonProtocolParams blk + , HasBinaryBlockInfo blk + , SerialiseDiskConstraints blk + , SerialiseNodeToNodeConstraints blk + , SerialiseNodeToClientConstraints blk + , LedgerSupportsPeerSelection blk + , NodeInitStorage blk + , BlockSupportsMetrics blk + , BlockSupportsDiffusionPipelining blk + , BlockSupportsSanityCheck blk + , Show (CannotForge blk) + , Show (ForgeStateInfo blk) + , Show (ForgeStateUpdateError blk) + , ShowProxy blk + , ShowProxy (ApplyTxErr blk) + , ShowProxy (GenTx blk) + , ShowProxy (Header blk) + , ShowProxy (BlockQuery blk) + , ShowProxy (TxId (GenTx blk)) + , (forall fp. ShowQuery (BlockQuery blk fp)) ) => RunNode blk -- This class is intentionally empty. It is not necessarily compositional - ie -- the instance for 'HardForkBlock' might do more than merely delegate to the diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Node/Serialisation.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Node/Serialisation.hs index 71f10eaafb..e89c7ad6fb 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Node/Serialisation.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Node/Serialisation.hs @@ -2,7 +2,9 @@ {-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE FlexibleInstances #-} {-# LANGUAGE MultiParamTypeClasses #-} +{-# LANGUAGE PolyKinds #-} {-# LANGUAGE RankNTypes #-} +{-# LANGUAGE StandaloneKindSignatures #-} {-# LANGUAGE UndecidableInstances #-} -- | Serialisation for sending things across the network. @@ -18,6 +20,7 @@ module Ouroboros.Consensus.Node.Serialisation ( SerialiseNodeToClient (..) , SerialiseNodeToNode (..) , SerialiseResult (..) + , SerialiseResult' (..) -- * Defaults , defaultDecodeCBORinCBOR , defaultEncodeCBORinCBOR @@ -28,14 +31,15 @@ module Ouroboros.Consensus.Node.Serialisation ( import Codec.CBOR.Decoding (Decoder) import Codec.CBOR.Encoding (Encoding) import Codec.Serialise (Serialise (decode, encode)) +import Data.Kind import Data.SOP.BasicFunctors import Ouroboros.Consensus.Block import Ouroboros.Consensus.Ledger.SupportsMempool (ApplyTxErr, GenTxId) import Ouroboros.Consensus.Node.NetworkProtocolVersion import Ouroboros.Consensus.TypeFamilyWrappers +import Ouroboros.Consensus.Util (Some (..)) import Ouroboros.Network.Block (unwrapCBORinCBOR, wrapCBORinCBOR) -import Ouroboros.Network.Protocol.LocalStateQuery.Codec (Some (..)) {------------------------------------------------------------------------------- NodeToNode @@ -91,18 +95,34 @@ class SerialiseNodeToClient blk a where -- -- The @LocalStateQuery@ protocol is a node-to-client protocol, hence the -- 'NodeToClientVersion' argument. +type SerialiseResult :: Type -> (Type -> Type -> Type) -> Constraint class SerialiseResult blk query where encodeResult :: forall result. CodecConfig blk -> BlockNodeToClientVersion blk - -> query result + -> query blk result -> result -> Encoding decodeResult :: forall result. CodecConfig blk -> BlockNodeToClientVersion blk - -> query result + -> query blk result + -> forall s. Decoder s result + +type SerialiseResult' :: Type -> (Type -> k -> Type -> Type) -> Constraint +class SerialiseResult' blk query where + encodeResult' + :: forall fp result. + CodecConfig blk + -> BlockNodeToClientVersion blk + -> query blk fp result + -> result -> Encoding + decodeResult' + :: forall fp result. + CodecConfig blk + -> BlockNodeToClientVersion blk + -> query blk fp result -> forall s. Decoder s result {------------------------------------------------------------------------------- diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/ChainDB.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/ChainDB.hs index 157350a948..3d9cc18932 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/ChainDB.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/ChainDB.hs @@ -1,28 +1,32 @@ --- | The storage layer is a highly specialized database for storing the blockchain. --- It consists of five subcomponents: +-- | The storage layer is a highly specialized database for storing the +-- blockchain. It consists of five subcomponents: -- -- * An abstract file system API, 'System.FS.API.HasFS', -- that smooths out over some differences between the file systems of -- different operating systems and, more importantly, allows us to simulate -- all kinds of failures. This is then used for stress-testing the other -- components below. +-- -- * The __[Immutable DB]("Ouroboros.Consensus.Storage.ImmutableDB")__, stores -- the part of the chain that is immutable, that is, no longer subject to -- rollback. It is an append-only database, providing efficient access to the --- chain. 'Ouroboros.Consensus.Storage.ImmutableDB.API.ImmutableDB' defines the --- immutable DB API. +-- chain. 'Ouroboros.Consensus.Storage.ImmutableDB.API.ImmutableDB' defines +-- the immutable DB API. +-- -- * The __[Volatile DB]("Ouroboros.Consensus.Storage.VolatileDB")__, stores the -- part of the chain near its tip. This doesn't really store a __chain__ as -- such, but rather simply a collection of blocks from which we might --- __construct__ a chain. 'Ouroboros.Consensus.Storage.VolatileDB.API.VolatileDB' --- defines the volatile DB API. --- * The ledger DB, stores the state of the ledger. The --- __[on disk]("Ouroboros.Consensus.Storage.LedgerDB.OnDisk")__ part only stores --- snapshots of the ledger state that correspond to immutable blocks. The --- __[in memory]("Ouroboros.Consensus.Storage.LedgerDB.InMemory")__ part --- stores various snapshots of the ledger state corresponding to blocks near --- the current tip of the chain, and provides an efficient way of computing --- any ledger state for the last @k@ blocks of the chain. +-- __construct__ a chain. +-- 'Ouroboros.Consensus.Storage.VolatileDB.API.VolatileDB' defines the +-- volatile DB API. +-- +-- * The __[Ledger DB]("Ouroboros.Consensus.Storage.LedgerDB")__, stores the +-- \(k\) last ledger states corresponding to the blocks in the volatile DB, as +-- well as the sequence of differences used to construct +-- 'Ouroboros.Consensus.Ledger.Tables.Basics.LedgerTables' at any of those +-- ledger states. 'Ouroboros.Consensus.Storage.LedgerDB.LedgerDB' defines the +-- ledger DB API. +-- -- * The Chain DB finally combines all of these components. It makes decisions -- about which chains to adopt (chain selection), switches to forks when -- needed, deals with clock skew, and provides various interfaces to the rest @@ -34,6 +38,12 @@ -- chain. 'Ouroboros.Consensus.Storage.ChainDB.API.ChainDB' defines the chain -- DB API. -- +-- NOTE: at the moment there is an inconsistency in the module structure for +-- each of these components. In particular, +-- "Ouroboros.Consensus.Storage.LedgerDB" contains the whole definition and API +-- for the LedgerDB, but the other three databases are broken up into multiple +-- smaller submodules. We aim to resolve this when UTxO-HD is merged. +-- module Ouroboros.Consensus.Storage.ChainDB ( module Ouroboros.Consensus.Storage.ChainDB.API , module Ouroboros.Consensus.Storage.ChainDB.Impl diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/ChainDB/API.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/ChainDB/API.hs index d905d6b240..d2632da2f9 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/ChainDB/API.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/ChainDB/API.hs @@ -15,10 +15,7 @@ module Ouroboros.Consensus.Storage.ChainDB.API ( -- * Main ChainDB API ChainDB (..) - , getCurrentLedger , getCurrentTip - , getImmutableLedger - , getPastLedger , getTipBlockNo -- * Adding a block , AddBlockPromise (..) @@ -70,19 +67,14 @@ import Control.ResourceRegistry import Data.Typeable (Typeable) import GHC.Generics (Generic) import Ouroboros.Consensus.Block -import Ouroboros.Consensus.HeaderStateHistory - (HeaderStateHistory (..)) +import Ouroboros.Consensus.HeaderStateHistory (HeaderStateHistory) import Ouroboros.Consensus.Ledger.Abstract import Ouroboros.Consensus.Ledger.Extended -import Ouroboros.Consensus.Ledger.SupportsProtocol import Ouroboros.Consensus.Storage.ChainDB.API.Types.InvalidBlockPunishment - (InvalidBlockPunishment) -import qualified Ouroboros.Consensus.Storage.ChainDB.API.Types.InvalidBlockPunishment as InvalidBlockPunishment import Ouroboros.Consensus.Storage.Common -import Ouroboros.Consensus.Storage.LedgerDB (LedgerDB') -import qualified Ouroboros.Consensus.Storage.LedgerDB as LedgerDB +import Ouroboros.Consensus.Storage.LedgerDB (GetForkerError, + ReadOnlyForker', Statistics) import Ouroboros.Consensus.Storage.Serialisation -import Ouroboros.Consensus.Util ((..:)) import Ouroboros.Consensus.Util.CallStack import Ouroboros.Consensus.Util.IOLike import Ouroboros.Consensus.Util.STM (WithFingerprint) @@ -93,6 +85,7 @@ import Ouroboros.Network.Block (ChainUpdate, MaxSlotNo, import qualified Ouroboros.Network.Block as Network import Ouroboros.Network.Mock.Chain (Chain (..)) import qualified Ouroboros.Network.Mock.Chain as Chain +import Ouroboros.Network.Protocol.LocalStateQuery.Type import System.FS.API.Types (FsError) -- | The chain database @@ -177,13 +170,36 @@ data ChainDB m blk = ChainDB { -- fragment will move as the chain grows. , getCurrentChain :: STM m (AnchoredFragment (Header blk)) - -- | Return the LedgerDB containing the last @k@ ledger states. - , getLedgerDB :: STM m (LedgerDB' blk) - -- | Get a 'HeaderStateHistory' populated with the 'HeaderState's and slot - -- times of the last @k@ blocks of the current chain. + -- | Get current ledger + , getCurrentLedger :: STM m (ExtLedgerState blk EmptyMK) + + -- | Get the immutable ledger, i.e., typically @k@ blocks back. + , getImmutableLedger :: STM m (ExtLedgerState blk EmptyMK) + + -- | Get the ledger for the given point. + -- + -- When the given point is not among the last @k@ blocks of the current + -- chain (i.e., older than @k@ or not on the current chain), 'Nothing' is + -- returned. + , getPastLedger :: Point blk -> STM m (Maybe (ExtLedgerState blk EmptyMK)) + + -- | Get a 'HeaderStateHistory' populated with the 'HeaderState's of the + -- last @k@ blocks of the current chain. , getHeaderStateHistory :: STM m (HeaderStateHistory blk) + -- | Acquire a read-only forker at a specific point if that point exists + -- on the db. + -- + -- Note that the forker should be closed by the caller of this function. + -- + -- The forker is read-only becase a read-write forker could be used to + -- change the internal state of the LedgerDB. + , getReadOnlyForkerAtPoint :: + ResourceRegistry m + -> Target (Point blk) + -> m (Either GetForkerError (ReadOnlyForker' m blk)) + -- | Get block at the tip of the chain, if one exists -- -- Returns 'Nothing' if the database is empty. @@ -347,6 +363,24 @@ data ChainDB m blk = ChainDB { -- invalid block is detected. These blocks are likely to be valid. , getIsInvalidBlock :: STM m (WithFingerprint (HeaderHash blk -> Maybe (ExtValidationError blk))) + -- | Read ledger tables at a given point on the chain, if it exists. + -- + -- This is intended to be used by the mempool to hydrate a ledger state at + -- a specific point. + , getLedgerTablesAtFor :: + Point blk + -> LedgerTables (ExtLedgerState blk) KeysMK + -> m (Maybe (LedgerTables (ExtLedgerState blk) ValuesMK)) + + -- | Get statistics from the LedgerDB, in particular the number of entries + -- in the tables. + , getStatistics :: m (Maybe Statistics) + + -- | Close the ChainDB + -- + -- Idempotent. + -- + -- Should only be called on shutdown. , closeDB :: m () -- | Return 'True' when the database is open. @@ -363,28 +397,6 @@ getTipBlockNo :: (Monad (STM m), HasHeader (Header blk)) => ChainDB m blk -> STM m (WithOrigin BlockNo) getTipBlockNo = fmap Network.getTipBlockNo . getCurrentTip --- | Get current ledger -getCurrentLedger :: - (Monad (STM m), IsLedger (LedgerState blk)) - => ChainDB m blk -> STM m (ExtLedgerState blk) -getCurrentLedger = fmap LedgerDB.ledgerDbCurrent . getLedgerDB - --- | Get the immutable ledger, i.e., typically @k@ blocks back. -getImmutableLedger :: - Monad (STM m) - => ChainDB m blk -> STM m (ExtLedgerState blk) -getImmutableLedger = fmap LedgerDB.ledgerDbAnchor . getLedgerDB - --- | Get the ledger for the given point. --- --- When the given point is not among the last @k@ blocks of the current --- chain (i.e., older than @k@ or not on the current chain), 'Nothing' is --- returned. -getPastLedger :: - (Monad (STM m), LedgerSupportsProtocol blk) - => ChainDB m blk -> Point blk -> STM m (Maybe (ExtLedgerState blk)) -getPastLedger db pt = LedgerDB.ledgerDbPast pt <$> getLedgerDB db - {------------------------------------------------------------------------------- Adding a block -------------------------------------------------------------------------------} @@ -545,7 +557,7 @@ fromChain :: -> m (ChainDB m blk) fromChain openDB chain = do chainDB <- openDB - mapM_ (addBlock_ chainDB InvalidBlockPunishment.noPunishment) $ Chain.toOldestFirst chain + mapM_ (addBlock_ chainDB noPunishment) $ Chain.toOldestFirst chain return chainDB {------------------------------------------------------------------------------- diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/ChainDB/Impl.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/ChainDB/Impl.hs index 87dca9f1f4..f9527b7e11 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/ChainDB/Impl.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/ChainDB/Impl.hs @@ -13,7 +13,6 @@ module Ouroboros.Consensus.Storage.ChainDB.Impl ( , openDB , withDB -- * Trace types - , LgrDB.TraceReplayEvent , SelectionChangedInfo (..) , TraceAddBlockEvent (..) , TraceCopyToImmutableDBEvent (..) @@ -28,20 +27,22 @@ module Ouroboros.Consensus.Storage.ChainDB.Impl ( -- * Re-exported for convenience , Args.RelativeMountPoint (..) , ImmutableDB.ImmutableDbSerialiseConstraints - , LgrDB.LgrDbSerialiseConstraints + , LedgerDB.LedgerDbSerialiseConstraints , VolatileDB.VolatileDbSerialiseConstraints -- * Internals for testing purposes , Internal (..) , openDBInternal ) where -import Control.Monad (when) +import Control.Monad (void, when) +import Control.Monad.Base (MonadBase) import Control.Monad.Trans.Class (lift) import Control.ResourceRegistry (WithTempRegistry, allocate, - runInnerWithTempRegistry, runWithTempRegistry) + runInnerWithTempRegistry, runWithTempRegistry, + withRegistry) import Control.Tracer -import Data.Functor (void, (<&>)) -import Data.Functor.Identity (Identity) +import Data.Functor ((<&>)) +import Data.Functor.Contravariant ((>$<)) import qualified Data.Map.Strict as Map import Data.Maybe.Strict (StrictMaybe (..)) import GHC.Stack (HasCallStack) @@ -59,12 +60,14 @@ import qualified Ouroboros.Consensus.Storage.ChainDB.Impl.Background as Backgrou import qualified Ouroboros.Consensus.Storage.ChainDB.Impl.ChainSel as ChainSel import qualified Ouroboros.Consensus.Storage.ChainDB.Impl.Follower as Follower import qualified Ouroboros.Consensus.Storage.ChainDB.Impl.Iterator as Iterator -import qualified Ouroboros.Consensus.Storage.ChainDB.Impl.LgrDB as LgrDB import qualified Ouroboros.Consensus.Storage.ChainDB.Impl.Query as Query import Ouroboros.Consensus.Storage.ChainDB.Impl.Types import qualified Ouroboros.Consensus.Storage.ImmutableDB as ImmutableDB +import qualified Ouroboros.Consensus.Storage.ImmutableDB.Impl.Stream as ImmutableDB +import qualified Ouroboros.Consensus.Storage.LedgerDB as LedgerDB import qualified Ouroboros.Consensus.Storage.VolatileDB as VolatileDB import Ouroboros.Consensus.Util (newFuse, whenJust, withFuse) +import Ouroboros.Consensus.Util.Args import Ouroboros.Consensus.Util.IOLike import Ouroboros.Consensus.Util.STM (Fingerprint (..), WithFingerprint (..)) @@ -83,8 +86,9 @@ withDB :: , HasHardForkHistory blk , ConvertRawHash blk , SerialiseDiskConstraints blk + , MonadBase m m ) - => ChainDbArgs Identity m blk + => Complete Args.ChainDbArgs m blk -> (ChainDB m blk -> m a) -> m a withDB args = bracket (fst <$> openDBInternal args True) API.closeDB @@ -98,8 +102,9 @@ openDB :: , HasHardForkHistory blk , ConvertRawHash blk , SerialiseDiskConstraints blk + , MonadBase m m ) - => ChainDbArgs Identity m blk + => Complete Args.ChainDbArgs m blk -> m (ChainDB m blk) openDB args = fst <$> openDBInternal args True @@ -112,8 +117,10 @@ openDBInternal :: , HasHardForkHistory blk , ConvertRawHash blk , SerialiseDiskConstraints blk + , HasCallStack + , MonadBase m m ) - => ChainDbArgs Identity m blk + => Complete Args.ChainDbArgs m blk -> Bool -- ^ 'True' = Launch background tasks -> m (ChainDB m blk, Internal m blk) openDBInternal args launchBgTasks = runWithTempRegistry $ do @@ -132,37 +139,40 @@ openDBInternal args launchBgTasks = runWithTempRegistry $ do maxSlot <- lift $ atomically $ VolatileDB.getMaxSlotNo volatileDB (chainDB, testing, env) <- lift $ do traceWith tracer $ TraceOpenEvent (OpenedVolatileDB maxSlot) - let lgrReplayTracer = - LgrDB.decorateReplayTracerWithGoal - immutableDbTipPoint - (contramap TraceLedgerReplayEvent tracer) traceWith tracer $ TraceOpenEvent StartedOpeningLgrDB - (lgrDB, replayed) <- LgrDB.openDB argsLgrDb - lgrReplayTracer - immutableDB + (lgrDB, replayed) <- LedgerDB.openDB + argsLgrDb + (ImmutableDB.streamAPI immutableDB) + immutableDbTipPoint (Query.getAnyKnownBlock immutableDB volatileDB) traceWith tracer $ TraceOpenEvent OpenedLgrDB varInvalid <- newTVarIO (WithFingerprint Map.empty (Fingerprint 0)) - let initChainSelTracer = contramap TraceInitChainSelEvent tracer + let initChainSelTracer = TraceInitChainSelEvent >$< tracer traceWith initChainSelTracer StartedInitChainSelection initialLoE <- Args.cdbsLoE cdbSpecificArgs - chainAndLedger <- ChainSel.initialChainSelection + chain <- withRegistry $ \rr -> do + chainAndLedger <- ChainSel.initialChainSelection immutableDB volatileDB lgrDB + rr initChainSelTracer (Args.cdbsTopLevelConfig cdbSpecificArgs) varInvalid (void initialLoE) - traceWith initChainSelTracer InitialChainSelected + traceWith initChainSelTracer InitialChainSelected + + let chain = VF.validatedFragment chainAndLedger + ledger = VF.validatedLedger chainAndLedger - let chain = VF.validatedFragment chainAndLedger - ledger = VF.validatedLedger chainAndLedger + atomically $ LedgerDB.forkerCommit ledger + LedgerDB.forkerClose ledger + pure chain + LedgerDB.tryFlush lgrDB - atomically $ LgrDB.setCurrent lgrDB ledger varChain <- newTVarIO chain varTentativeState <- newTVarIO $ initialTentativeHeaderState (Proxy @blk) varTentativeHeader <- newTVarIO SNothing @@ -177,7 +187,7 @@ openDBInternal args launchBgTasks = runWithTempRegistry $ do let env = CDB { cdbImmutableDB = immutableDB , cdbVolatileDB = volatileDB - , cdbLgrDB = lgrDB + , cdbLedgerDB = lgrDB , cdbChain = varChain , cdbTentativeState = varTentativeState , cdbTentativeHeader = varTentativeHeader @@ -199,32 +209,38 @@ openDBInternal args launchBgTasks = runWithTempRegistry $ do } h <- fmap CDBHandle $ newTVarIO $ ChainDbOpen env let chainDB = API.ChainDB - { addBlockAsync = getEnv2 h ChainSel.addBlockAsync + { addBlockAsync = getEnv2 h ChainSel.addBlockAsync , chainSelAsync = getEnv h ChainSel.triggerChainSelectionAsync - , getCurrentChain = getEnvSTM h Query.getCurrentChain - , getLedgerDB = getEnvSTM h Query.getLedgerDB - , getHeaderStateHistory = getEnvSTM h Query.getHeaderStateHistory - , getTipBlock = getEnv h Query.getTipBlock - , getTipHeader = getEnv h Query.getTipHeader - , getTipPoint = getEnvSTM h Query.getTipPoint - , getBlockComponent = getEnv2 h Query.getBlockComponent - , getIsFetched = getEnvSTM h Query.getIsFetched - , getIsValid = getEnvSTM h Query.getIsValid - , getMaxSlotNo = getEnvSTM h Query.getMaxSlotNo - , stream = Iterator.stream h - , newFollower = Follower.newFollower h - , getIsInvalidBlock = getEnvSTM h Query.getIsInvalidBlock - , closeDB = closeDB h - , isOpen = isOpen h + , getCurrentChain = getEnvSTM h Query.getCurrentChain + , getTipBlock = getEnv h Query.getTipBlock + , getTipHeader = getEnv h Query.getTipHeader + , getTipPoint = getEnvSTM h Query.getTipPoint + , getBlockComponent = getEnv2 h Query.getBlockComponent + , getIsFetched = getEnvSTM h Query.getIsFetched + , getIsValid = getEnvSTM h Query.getIsValid + , getMaxSlotNo = getEnvSTM h Query.getMaxSlotNo + , stream = Iterator.stream h + , newFollower = Follower.newFollower h + , getIsInvalidBlock = getEnvSTM h Query.getIsInvalidBlock + , closeDB = closeDB h + , isOpen = isOpen h + , getCurrentLedger = getEnvSTM h Query.getCurrentLedger + , getImmutableLedger = getEnvSTM h Query.getImmutableLedger + , getPastLedger = getEnvSTM1 h Query.getPastLedger + , getHeaderStateHistory = getEnvSTM h Query.getHeaderStateHistory + , getReadOnlyForkerAtPoint = getEnv2 h Query.getReadOnlyForkerAtPoint + , getLedgerTablesAtFor = getEnv2 h Query.getLedgerTablesAtFor + , getStatistics = getEnv h Query.getStatistics } addBlockTestFuse <- newFuse "test chain selection" copyTestFuse <- newFuse "test copy to immutable db" let testing = Internal - { intCopyToImmutableDB = getEnv h (withFuse copyTestFuse . Background.copyToImmutableDB) - , intGarbageCollect = getEnv1 h Background.garbageCollect - , intUpdateLedgerSnapshots = getEnv h Background.updateLedgerSnapshots - , intAddBlockRunner = getEnv h (Background.addBlockRunner addBlockTestFuse) - , intKillBgThreads = varKillBgThreads + { intCopyToImmutableDB = getEnv h (withFuse copyTestFuse . Background.copyToImmutableDB) + , intGarbageCollect = getEnv1 h Background.garbageCollect + , intTryTakeSnapshot = getEnv h $ \env' -> + void $ LedgerDB.tryTakeSnapshot (cdbLedgerDB env') Nothing maxBound + , intAddBlockRunner = getEnv h (Background.addBlockRunner addBlockTestFuse) + , intKillBgThreads = varKillBgThreads } traceWith tracer $ TraceOpenEvent $ OpenedDB @@ -235,7 +251,7 @@ openDBInternal args launchBgTasks = runWithTempRegistry $ do return (chainDB, testing, env) - _ <- lift $ allocate (Args.cdbsRegistry cdbSpecificArgs) (\_ -> return $ chainDB) API.closeDB + _ <- lift $ allocate (Args.cdbsRegistry cdbSpecificArgs) (\_ -> return chainDB) API.closeDB return ((chainDB, testing), env) where @@ -271,6 +287,7 @@ closeDB :: ) => ChainDbHandle m blk -> m () closeDB (CDBHandle varState) = do + traceMarkerIO "Closing ChainDB" mbOpenEnv <- atomically $ readTVar varState >>= \case -- Idempotent ChainDbClosed -> return Nothing @@ -289,6 +306,7 @@ closeDB (CDBHandle varState) = do ImmutableDB.closeDB cdbImmutableDB VolatileDB.closeDB cdbVolatileDB + LedgerDB.closeDB cdbLedgerDB chain <- atomically $ readTVar cdbChain diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/ChainDB/Impl/Args.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/ChainDB/Impl/Args.hs index 9b88506e0b..beb757434e 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/ChainDB/Impl/Args.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/ChainDB/Impl/Args.hs @@ -13,7 +13,7 @@ module Ouroboros.Consensus.Storage.ChainDB.Impl.Args ( , completeChainDbArgs , defaultArgs , ensureValidateAll - , updateDiskPolicyArgs + , updateSnapshotPolicyArgs , updateTracer ) where @@ -25,15 +25,18 @@ import Data.Time.Clock (secondsToDiffTime) import Ouroboros.Consensus.Block import Ouroboros.Consensus.Config import Ouroboros.Consensus.Ledger.Extended +import Ouroboros.Consensus.Ledger.Tables import Ouroboros.Consensus.Protocol.Abstract import Ouroboros.Consensus.Storage.ChainDB.API (GetLoEFragment, LoE (LoEDisabled)) -import qualified Ouroboros.Consensus.Storage.ChainDB.Impl.LgrDB as LedgerDB import Ouroboros.Consensus.Storage.ChainDB.Impl.Types (TraceEvent (..)) import qualified Ouroboros.Consensus.Storage.ImmutableDB as ImmutableDB -import qualified Ouroboros.Consensus.Storage.LedgerDB as LedgerDB -import Ouroboros.Consensus.Storage.LedgerDB.DiskPolicy +import qualified Ouroboros.Consensus.Storage.LedgerDB.API.Config as LedgerDB +import Ouroboros.Consensus.Storage.LedgerDB.Impl.Args + (LedgerDbFlavorArgs) +import qualified Ouroboros.Consensus.Storage.LedgerDB.Impl.Args as LedgerDB +import Ouroboros.Consensus.Storage.LedgerDB.Impl.Snapshots import qualified Ouroboros.Consensus.Storage.VolatileDB as VolatileDB import Ouroboros.Consensus.Util.Args import Ouroboros.Consensus.Util.IOLike @@ -46,7 +49,7 @@ import System.FS.API data ChainDbArgs f m blk = ChainDbArgs { cdbImmDbArgs :: ImmutableDB.ImmutableDbArgs f m blk , cdbVolDbArgs :: VolatileDB.VolatileDbArgs f m blk - , cdbLgrDbArgs :: LedgerDB.LgrDbArgs f m blk + , cdbLgrDbArgs :: LedgerDB.LedgerDbArgs f m blk , cdbsArgs :: ChainDbSpecificArgs f m blk } @@ -148,7 +151,7 @@ completeChainDbArgs :: forall m blk. (ConsensusProtocol (BlockProtocol blk), IOLike m) => ResourceRegistry m -> TopLevelConfig blk - -> ExtLedgerState blk + -> ExtLedgerState blk ValuesMK -- ^ Initial ledger -> ImmutableDB.ChunkInfo -> (blk -> Bool) @@ -157,6 +160,7 @@ completeChainDbArgs :: -- ^ Immutable FS, see 'NodeDatabasePaths' -> (RelativeMountPoint -> SomeHasFS m) -- ^ Volatile FS, see 'NodeDatabasePaths' + -> Complete LedgerDbFlavorArgs m -> Incomplete ChainDbArgs m blk -- ^ A set of incomplete arguments, possibly modified wrt @defaultArgs@ -> Complete ChainDbArgs m blk @@ -168,6 +172,7 @@ completeChainDbArgs checkIntegrity mkImmFS mkVolFS + flavorArgs defArgs = defArgs { cdbImmDbArgs = (cdbImmDbArgs defArgs) { @@ -186,6 +191,8 @@ completeChainDbArgs LedgerDB.lgrGenesis = pure initLedger , LedgerDB.lgrHasFS = mkVolFS $ RelativeMountPoint "ledger" , LedgerDB.lgrConfig = LedgerDB.configLedgerDb cdbsTopLevelConfig + , LedgerDB.lgrFlavorArgs = flavorArgs + , LedgerDB.lgrRegistry = registry } , cdbsArgs = (cdbsArgs defArgs) { cdbsRegistry = registry @@ -195,23 +202,23 @@ completeChainDbArgs } updateTracer :: - Tracer m (TraceEvent blk) + Tracer m (TraceEvent blk) -> ChainDbArgs f m blk -> ChainDbArgs f m blk updateTracer trcr args = args { cdbImmDbArgs = (cdbImmDbArgs args) { ImmutableDB.immTracer = TraceImmutableDBEvent >$< trcr } , cdbVolDbArgs = (cdbVolDbArgs args) { VolatileDB.volTracer = TraceVolatileDBEvent >$< trcr } - , cdbLgrDbArgs = (cdbLgrDbArgs args) { LedgerDB.lgrTracer = TraceSnapshotEvent >$< trcr } + , cdbLgrDbArgs = (cdbLgrDbArgs args) { LedgerDB.lgrTracer = TraceLedgerDBEvent >$< trcr } , cdbsArgs = (cdbsArgs args) { cdbsTracer = trcr } } -updateDiskPolicyArgs :: - DiskPolicyArgs +updateSnapshotPolicyArgs :: + SnapshotPolicyArgs -> ChainDbArgs f m blk -> ChainDbArgs f m blk -updateDiskPolicyArgs spa args = - args { cdbLgrDbArgs = (cdbLgrDbArgs args) { LedgerDB.lgrDiskPolicyArgs = spa } } +updateSnapshotPolicyArgs spa args = + args { cdbLgrDbArgs = (cdbLgrDbArgs args) { LedgerDB.lgrSnapshotPolicyArgs = spa } } {------------------------------------------------------------------------------- Relative mount points diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/ChainDB/Impl/Background.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/ChainDB/Impl/Background.hs index aab651ccb1..a4d858f7ca 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/ChainDB/Impl/Background.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/ChainDB/Impl/Background.hs @@ -6,6 +6,7 @@ {-# LANGUAGE NamedFieldPuns #-} {-# LANGUAGE RecordWildCards #-} {-# LANGUAGE ScopedTypeVariables #-} +{-# LANGUAGE TupleSections #-} -- | Background tasks: -- @@ -19,7 +20,6 @@ module Ouroboros.Consensus.Storage.ChainDB.Impl.Background ( -- * Copying blocks from the VolatileDB to the ImmutableDB , copyAndSnapshotRunner , copyToImmutableDB - , updateLedgerSnapshots -- * Executing garbage collection , garbageCollect -- * Scheduling garbage collections @@ -53,7 +53,6 @@ import GHC.Stack (HasCallStack) import Ouroboros.Consensus.Block import Ouroboros.Consensus.Config import Ouroboros.Consensus.HardFork.Abstract -import Ouroboros.Consensus.Ledger.Abstract import Ouroboros.Consensus.Ledger.Inspect import Ouroboros.Consensus.Ledger.SupportsProtocol import Ouroboros.Consensus.Protocol.Abstract @@ -61,12 +60,9 @@ import Ouroboros.Consensus.Storage.ChainDB.API (AddBlockResult (..), BlockComponent (..)) import Ouroboros.Consensus.Storage.ChainDB.Impl.ChainSel (chainSelSync) -import Ouroboros.Consensus.Storage.ChainDB.Impl.LgrDB - (LgrDbSerialiseConstraints) -import qualified Ouroboros.Consensus.Storage.ChainDB.Impl.LgrDB as LgrDB import Ouroboros.Consensus.Storage.ChainDB.Impl.Types import qualified Ouroboros.Consensus.Storage.ImmutableDB as ImmutableDB -import Ouroboros.Consensus.Storage.LedgerDB (TimeSinceLast (..)) +import qualified Ouroboros.Consensus.Storage.LedgerDB as LedgerDB import qualified Ouroboros.Consensus.Storage.VolatileDB as VolatileDB import Ouroboros.Consensus.Util import Ouroboros.Consensus.Util.Condense @@ -86,7 +82,6 @@ launchBgTasks :: , BlockSupportsDiffusionPipelining blk , InspectLedger blk , HasHardForkHistory blk - , LgrDbSerialiseConstraints blk ) => ChainDbEnv m blk -> Word64 -- ^ Number of immutable blocks replayed on ledger DB startup @@ -188,26 +183,31 @@ copyToImmutableDB CDB{..} = electric $ do -- happen if the precondition was satisfied. _ -> error "header to remove not on the current chain" +{------------------------------------------------------------------------------- + Snapshotting +-------------------------------------------------------------------------------} + -- | Copy blocks from the VolatileDB to ImmutableDB and take snapshots of the --- LgrDB +-- LedgerDB -- -- We watch the chain for changes. Whenever the chain is longer than @k@, then -- the headers older than @k@ are copied from the VolatileDB to the ImmutableDB -- (using 'copyToImmutableDB'). Once that is complete, -- --- * We periodically take a snapshot of the LgrDB (depending on its config). +-- * We periodically take a snapshot of the LedgerDB (depending on its config). -- When enough blocks (depending on its config) have been replayed during --- startup, a snapshot of the replayed LgrDB will be written to disk at the --- start of this function. --- NOTE: After this initial snapshot we do not take a snapshot of the LgrDB --- until the chain has changed again, irrespective of the LgrDB policy. +-- startup, a snapshot of the replayed LedgerDB will be written to disk at the +-- start of this function. NOTE: After this initial snapshot we do not take a +-- snapshot of the LedgerDB until the chain has changed again, irrespective of +-- the LedgerDB policy. +-- -- * Schedule GC of the VolatileDB ('scheduleGC') for the 'SlotNo' of the most -- recent block that was copied. -- --- It is important that we only take LgrDB snapshots when are are /sure/ they --- have been copied to the ImmutableDB, since the LgrDB assumes that all +-- It is important that we only take LedgerDB snapshots when are are /sure/ they +-- have been copied to the ImmutableDB, since the LedgerDB assumes that all -- snapshots correspond to immutable blocks. (Of course, data corruption can --- occur and we can handle it by reverting to an older LgrDB snapshot, but we +-- occur and we can handle it by reverting to an older LedgerDB snapshot, but we -- should need this only in exceptional circumstances.) -- -- We do not store any state of the VolatileDB GC. If the node shuts down before @@ -217,30 +217,26 @@ copyToImmutableDB CDB{..} = electric $ do copyAndSnapshotRunner :: forall m blk. ( IOLike m - , ConsensusProtocol (BlockProtocol blk) - , HasHeader blk - , GetHeader blk - , IsLedger (LedgerState blk) - , LgrDbSerialiseConstraints blk + , LedgerSupportsProtocol blk ) => ChainDbEnv m blk -> GcSchedule m -> Word64 -- ^ Number of immutable blocks replayed on ledger DB startup -> Fuse m -> m Void -copyAndSnapshotRunner cdb@CDB{..} gcSchedule replayed fuse = - if onDiskShouldTakeSnapshot NoSnapshotTakenYet replayed then do - updateLedgerSnapshots cdb - now <- getMonotonicTime - loop (TimeSinceLast now) 0 - else - loop NoSnapshotTakenYet replayed +copyAndSnapshotRunner cdb@CDB{..} gcSchedule replayed fuse = do + LedgerDB.tryFlush cdbLedgerDB + loop =<< LedgerDB.tryTakeSnapshot cdbLedgerDB Nothing replayed where - SecurityParam k = configSecurityParam cdbTopLevelConfig - LgrDB.DiskPolicy{..} = LgrDB.getDiskPolicy cdbLgrDB + SecurityParam k = configSecurityParam cdbTopLevelConfig + + loop :: LedgerDB.SnapCounters -> m Void + loop counters = do + let LedgerDB.SnapCounters { + prevSnapshotTime + , ntBlocksSinceLastSnap + } = counters - loop :: TimeSinceLast Time -> Word64 -> m Void - loop mPrevSnapshot distance = do -- Wait for the chain to grow larger than @k@ numToWrite <- atomically $ do curChain <- readTVar cdbChain @@ -253,15 +249,12 @@ copyAndSnapshotRunner cdb@CDB{..} gcSchedule replayed fuse = -- copied to disk (though not flushed, necessarily). withFuse fuse (copyToImmutableDB cdb) >>= scheduleGC' + LedgerDB.tryFlush cdbLedgerDB + now <- getMonotonicTime - let distance' = distance + numToWrite - elapsed = (\prev -> now `diffTime` prev) <$> mPrevSnapshot + let ntBlocksSinceLastSnap' = ntBlocksSinceLastSnap + numToWrite - if onDiskShouldTakeSnapshot elapsed distance' then do - updateLedgerSnapshots cdb - loop (TimeSinceLast now) 0 - else - loop mPrevSnapshot distance' + loop =<< LedgerDB.tryTakeSnapshot cdbLedgerDB ((,now) <$> prevSnapshotTime) ntBlocksSinceLastSnap' scheduleGC' :: WithOrigin SlotNo -> m () scheduleGC' Origin = return () @@ -275,19 +268,6 @@ copyAndSnapshotRunner cdb@CDB{..} gcSchedule replayed fuse = } gcSchedule --- | Write a snapshot of the LedgerDB to disk and remove old snapshots --- (typically one) so that only 'onDiskNumSnapshots' snapshots are on disk. -updateLedgerSnapshots :: - ( IOLike m - , LgrDbSerialiseConstraints blk - , HasHeader blk - , IsLedger (LedgerState blk) - ) - => ChainDbEnv m blk -> m () -updateLedgerSnapshots CDB{..} = do - void $ LgrDB.takeSnapshot cdbLgrDB - void $ LgrDB.trimSnapshots cdbLgrDB - {------------------------------------------------------------------------------- Executing garbage collection -------------------------------------------------------------------------------} @@ -311,7 +291,7 @@ garbageCollect :: forall m blk. IOLike m => ChainDbEnv m blk -> SlotNo -> m () garbageCollect CDB{..} slotNo = do VolatileDB.garbageCollect cdbVolatileDB slotNo atomically $ do - LgrDB.garbageCollectPrevApplied cdbLgrDB slotNo + LedgerDB.garbageCollect cdbLedgerDB slotNo modifyTVar cdbInvalid $ fmap $ Map.filter ((>= slotNo) . invalidBlockSlotNo) traceWith cdbTracer $ TraceGCEvent $ PerformedGC slotNo diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/ChainDB/Impl/ChainSel.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/ChainDB/Impl/ChainSel.hs index 2a25cfcdd1..8441c13f70 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/ChainDB/Impl/ChainSel.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/ChainDB/Impl/ChainSel.hs @@ -25,6 +25,7 @@ import Control.Monad (forM, forM_, when) import Control.Monad.Except () import Control.Monad.Trans.Class (lift) import Control.Monad.Trans.State.Strict +import Control.ResourceRegistry (ResourceRegistry, withRegistry) import Control.Tracer (Tracer, nullTracer, traceWith) import Data.Foldable (for_) import Data.Function (on) @@ -63,9 +64,6 @@ import qualified Ouroboros.Consensus.Storage.ChainDB.API.Types.InvalidBlockPunis import Ouroboros.Consensus.Storage.ChainDB.Impl.BlockCache (BlockCache) import qualified Ouroboros.Consensus.Storage.ChainDB.Impl.BlockCache as BlockCache -import Ouroboros.Consensus.Storage.ChainDB.Impl.LgrDB (LedgerDB', - LgrDB) -import qualified Ouroboros.Consensus.Storage.ChainDB.Impl.LgrDB as LgrDB import Ouroboros.Consensus.Storage.ChainDB.Impl.Paths (LookupBlockInfo) import qualified Ouroboros.Consensus.Storage.ChainDB.Impl.Paths as Paths @@ -73,6 +71,9 @@ import qualified Ouroboros.Consensus.Storage.ChainDB.Impl.Query as Query import Ouroboros.Consensus.Storage.ChainDB.Impl.Types import Ouroboros.Consensus.Storage.ImmutableDB (ImmutableDB) import qualified Ouroboros.Consensus.Storage.ImmutableDB as ImmutableDB +import Ouroboros.Consensus.Storage.LedgerDB (AnnLedgerError (..), + Forker', LedgerDB', ValidateResult (..)) +import qualified Ouroboros.Consensus.Storage.LedgerDB as LedgerDB import Ouroboros.Consensus.Storage.VolatileDB (VolatileDB) import qualified Ouroboros.Consensus.Storage.VolatileDB as VolatileDB import Ouroboros.Consensus.Util @@ -84,6 +85,7 @@ import Ouroboros.Network.AnchoredFragment (Anchor, AnchoredFragment, AnchoredSeq (..)) import qualified Ouroboros.Network.AnchoredFragment as AF import qualified Ouroboros.Network.AnchoredSeq as AS +import Ouroboros.Network.Protocol.LocalStateQuery.Type (Target (..)) -- | Perform the initial chain selection based on the tip of the ImmutableDB -- and the contents of the VolatileDB. @@ -98,13 +100,14 @@ initialChainSelection :: ) => ImmutableDB m blk -> VolatileDB m blk - -> LgrDB m blk + -> LedgerDB' m blk + -> ResourceRegistry m -> Tracer m (TraceInitChainSelEvent blk) -> TopLevelConfig blk -> StrictTVar m (WithFingerprint (InvalidBlocks blk)) -> LoE () - -> m (ChainAndLedger blk) -initialChainSelection immutableDB volatileDB lgrDB tracer cfg varInvalid + -> m (ChainAndLedger m blk) +initialChainSelection immutableDB volatileDB lgrDB rr tracer cfg varInvalid loE = do -- TODO: Improve the user experience by trimming any potential -- blocks from the future from the VolatileDB. @@ -125,26 +128,34 @@ initialChainSelection immutableDB volatileDB lgrDB tracer cfg varInvalid -- and a node operator can correct the problem by either wiping -- out the VolatileDB or waiting enough time until the blocks are -- not from the **far** future anymore. - (i :: Anchor blk, succsOf, ledger) <- atomically $ do + (i :: Anchor blk, succsOf) <- atomically $ do invalid <- forgetFingerprint <$> readTVar varInvalid - (,,) + (,) <$> ImmutableDB.getTipAnchor immutableDB <*> (ignoreInvalidSuc volatileDB invalid <$> VolatileDB.filterByPredecessor volatileDB) - <*> LgrDB.getCurrent lgrDB + + -- This is safe: the LedgerDB tip doesn't change in between the previous + -- atomically block and this call to 'withTipForker'. + -- + -- We don't use 'LedgerDB.withTipForker' here, because 'curForker' might be + -- returned as part of the selected chain. + curForker <- LedgerDB.getForkerAtWellKnownPoint lgrDB rr VolatileTip chains <- constructChains i succsOf -- We use the empty fragment anchored at @i@ as the current chain (and -- ledger) and the default in case there is no better candidate. let curChain = Empty (AF.castAnchor i) - curChainAndLedger = VF.ValidatedFragment curChain ledger + curChainAndLedger <- VF.newM curChain curForker case NE.nonEmpty (filter (preferAnchoredCandidate bcfg curChain) chains) of -- If there are no candidates, no chain selection is needed Nothing -> return curChainAndLedger - Just chains' -> maybe curChainAndLedger toChainAndLedger <$> - chainSelection' curChainAndLedger chains' + Just chains' -> + chainSelection' curChainAndLedger chains' >>= \case + Nothing -> pure curChainAndLedger + Just newChain -> LedgerDB.forkerClose curForker >> toChainAndLedger newChain where bcfg :: BlockConfig blk bcfg = configBlock cfg @@ -158,13 +169,13 @@ initialChainSelection immutableDB volatileDB lgrDB tracer cfg varInvalid -- This is guaranteed by the fact that all constructed candidates start -- from this tip. toChainAndLedger - :: ValidatedChainDiff (Header blk) (LedgerDB' blk) - -> ChainAndLedger blk + :: ValidatedChainDiff (Header blk) (Forker' m blk) + -> m (ChainAndLedger m blk) toChainAndLedger (ValidatedChainDiff chainDiff ledger) = case chainDiff of ChainDiff rollback suffix | rollback == 0 - -> VF.ValidatedFragment suffix ledger + -> VF.newM suffix ledger | otherwise -> error "constructed an initial chain with rollback" @@ -207,19 +218,18 @@ initialChainSelection immutableDB volatileDB lgrDB tracer cfg varInvalid -- PRECONDITION: all candidates must be preferred over the current chain. chainSelection' :: HasCallStack - => ChainAndLedger blk + => ChainAndLedger m blk -- ^ The current chain and ledger, corresponding to -- @i@. -> NonEmpty (AnchoredFragment (Header blk)) -- ^ Candidates anchored at @i@ - -> m (Maybe (ValidatedChainDiff (Header blk) (LedgerDB' blk))) + -> m (Maybe (ValidatedChainDiff (Header blk) (Forker' m blk))) chainSelection' curChainAndLedger candidates = - assert (all ((LgrDB.currentPoint ledger ==) . - castPoint . AF.anchorPoint) - candidates) $ + atomically (LedgerDB.forkerCurrentPoint ledger) >>= \curpt -> + assert (all ((curpt ==) . castPoint . AF.anchorPoint) candidates) $ assert (all (preferAnchoredCandidate bcfg curChain) candidates) $ do cse <- chainSelEnv - chainSelection cse (Diff.extend <$> candidates) + chainSelection cse rr (Diff.extend <$> candidates) where curChain = VF.validatedFragment curChainAndLedger ledger = VF.validatedLedger curChainAndLedger @@ -485,94 +495,91 @@ chainSelectionForBlock :: -> Header blk -> InvalidBlockPunishment m -> Electric m () -chainSelectionForBlock cdb@CDB{..} blockCache hdr punish = electric $ do - (invalid, succsOf', lookupBlockInfo, lookupBlockInfo', curChain, tipPoint, ledgerDB) - <- atomically $ do - (invalid, succsOf, lookupBlockInfo, curChain, tipPoint, ledgerDB) <- - (,,,,,) - <$> (forgetFingerprint <$> readTVar cdbInvalid) - <*> VolatileDB.filterByPredecessor cdbVolatileDB - <*> VolatileDB.getBlockInfo cdbVolatileDB - <*> Query.getCurrentChain cdb - <*> Query.getTipPoint cdb - <*> LgrDB.getCurrent cdbLgrDB +chainSelectionForBlock cdb@CDB{..} blockCache hdr punish = electric $ withRegistry $ \rr -> do + (invalid, succsOf, lookupBlockInfo, curChain, tipPoint) + <- atomically $ (,,,,) + <$> (forgetFingerprint <$> readTVar cdbInvalid) + <*> VolatileDB.filterByPredecessor cdbVolatileDB + <*> VolatileDB.getBlockInfo cdbVolatileDB + <*> Query.getCurrentChain cdb + <*> Query.getTipPoint cdb + -- This is safe: the LedgerDB tip doesn't change in between the previous + -- atomically block and this call to 'withTipForker'. + LedgerDB.withTipForker cdbLedgerDB rr $ \curForker -> do + curChainAndLedger :: ChainAndLedger m blk <- + -- The current chain we're working with here is not longer than @k@ + -- blocks (see 'getCurrentChain' and 'cdbChain'), which is easier to + -- reason about when doing chain selection, etc. + assert (fromIntegral (AF.length curChain) <= k) $ + VF.newM curChain curForker + + let + immBlockNo :: WithOrigin BlockNo + immBlockNo = AF.anchorBlockNo curChain -- Let these two functions ignore invalid blocks - let lookupBlockInfo' = ignoreInvalid cdb invalid lookupBlockInfo - succsOf' = ignoreInvalidSuc cdb invalid succsOf - - pure (invalid, succsOf', lookupBlockInfo, lookupBlockInfo', curChain, tipPoint, ledgerDB) - - let curChainAndLedger :: ChainAndLedger blk - curChainAndLedger = - -- The current chain we're working with here is not longer than @k@ - -- blocks (see 'getCurrentChain' and 'cdbChain'), which is easier to - -- reason about when doing chain selection, etc. - assert (fromIntegral (AF.length curChain) <= k) $ - VF.ValidatedFragment curChain ledgerDB - - immBlockNo :: WithOrigin BlockNo - immBlockNo = AF.anchorBlockNo curChain - - -- The preconditions - assert (isJust $ lookupBlockInfo (headerHash hdr)) $ return () - - let - -- Trim the LoE fragment to be anchored in the immutable tip, ie the - -- anchor of @curChain@. In particular, this establishes the property that - -- it intersects with the current chain. - sanitizeLoEFrag loeFrag0 = - case AF.splitAfterPoint loeFrag0 (AF.anchorPoint curChain) of - Just (_, frag) -> frag - -- As the (unsanitized) LoE fragment is rooted in a recent immutable - -- tip, this case means that it doesn't intersect with the current - -- chain. This can temporarily be the case; we are conservative and - -- use the empty fragment anchored at the immutable tip for chain - -- selection. - Nothing -> AF.Empty (AF.anchor curChain) - - loeFrag <- fmap sanitizeLoEFrag <$> cdbLoE - - traceWith addBlockTracer (ChainSelectionLoEDebug curChain loeFrag) - - if - -- The chain might have grown since we added the block such that the - -- block is older than @k@. - | olderThanK hdr isEBB immBlockNo -> do - traceWith addBlockTracer $ IgnoreBlockOlderThanK p - - -- The block is invalid - | Just (InvalidBlockInfo reason _) <- Map.lookup (headerHash hdr) invalid -> do - traceWith addBlockTracer $ IgnoreInvalidBlock p reason - - -- We wouldn't know the block is invalid if its prefix was invalid, - -- hence 'InvalidBlockPunishment.BlockItself'. - InvalidBlockPunishment.enact - punish - InvalidBlockPunishment.BlockItself - - -- The block fits onto the end of our current chain - | pointHash tipPoint == headerPrevHash hdr -> do - -- ### Add to current chain - traceWith addBlockTracer (TryAddToCurrentChain p) - addToCurrentChain succsOf' curChainAndLedger loeFrag - - -- The block is reachable from the current selection - -- and it doesn't fit after the current selection - | Just diff <- Paths.isReachable lookupBlockInfo' curChain p -> do - -- ### Switch to a fork - traceWith addBlockTracer (TrySwitchToAFork p diff) - switchToAFork succsOf' lookupBlockInfo' curChainAndLedger loeFrag diff - - -- We cannot reach the block from the current selection - | otherwise -> do - -- ### Store but don't change the current chain - traceWith addBlockTracer (StoreButDontChange p) - - -- Note that we may have extended the chain, but have not trimmed it to - -- @k@ blocks/headers. That is the job of the background thread, which - -- will first copy the blocks/headers to trim (from the end of the - -- fragment) from the VolatileDB to the ImmutableDB. + lookupBlockInfo' = ignoreInvalid cdb invalid lookupBlockInfo + succsOf' = ignoreInvalidSuc cdb invalid succsOf + + -- The preconditions + assert (isJust $ lookupBlockInfo (headerHash hdr)) $ return () + + let + -- Trim the LoE fragment to be anchored in the immutable tip, ie the + -- anchor of @curChain@. In particular, this establishes the property that + -- it intersects with the current chain. + sanitizeLoEFrag loeFrag0 = + case AF.splitAfterPoint loeFrag0 (AF.anchorPoint curChain) of + Just (_, frag) -> frag + -- As the (unsanitized) LoE fragment is rooted in a recent immutable + -- tip, this case means that it doesn't intersect with the current + -- chain. This can temporarily be the case; we are conservative and + -- use the empty fragment anchored at the immutable tip for chain + -- selection. + Nothing -> AF.Empty (AF.anchor curChain) + + loeFrag <- fmap sanitizeLoEFrag <$> cdbLoE + + traceWith addBlockTracer (ChainSelectionLoEDebug curChain loeFrag) + + if + -- The chain might have grown since we added the block such that the + -- block is older than @k@. + | olderThanK hdr isEBB immBlockNo -> do + traceWith addBlockTracer $ IgnoreBlockOlderThanK p + + -- The block is invalid + | Just (InvalidBlockInfo reason _) <- Map.lookup (headerHash hdr) invalid -> do + traceWith addBlockTracer $ IgnoreInvalidBlock p reason + + -- We wouldn't know the block is invalid if its prefix was invalid, + -- hence 'InvalidBlockPunishment.BlockItself'. + InvalidBlockPunishment.enact + punish + InvalidBlockPunishment.BlockItself + + -- The block fits onto the end of our current chain + | pointHash tipPoint == headerPrevHash hdr -> do + -- ### Add to current chain + traceWith addBlockTracer (TryAddToCurrentChain p) + addToCurrentChain rr succsOf' curChainAndLedger loeFrag + + -- The block is reachable from the current selection + -- and it doesn't fit after the current selection + | Just diff <- Paths.isReachable lookupBlockInfo' curChain p -> do + -- ### Switch to a fork + traceWith addBlockTracer (TrySwitchToAFork p diff) + switchToAFork rr succsOf' lookupBlockInfo' curChainAndLedger loeFrag diff + + -- We cannot reach the block from the current selection + | otherwise -> do + -- ### Store but don't change the current chain + traceWith addBlockTracer (StoreButDontChange p) + + -- Note that we may have extended the chain, but have not trimmed it to + -- @k@ blocks/headers. That is the job of the background thread, which + -- will first copy the blocks/headers to trim (from the end of the + -- fragment) from the VolatileDB to the ImmutableDB. where SecurityParam k = configSecurityParam cdbTopLevelConfig @@ -585,9 +592,9 @@ chainSelectionForBlock cdb@CDB{..} blockCache hdr punish = electric $ do addBlockTracer :: Tracer m (TraceAddBlockEvent blk) addBlockTracer = TraceAddBlockEvent >$< cdbTracer - mkChainSelEnv :: ChainAndLedger blk -> ChainSelEnv m blk + mkChainSelEnv :: ChainAndLedger m blk -> ChainSelEnv m blk mkChainSelEnv curChainAndLedger = ChainSelEnv - { lgrDB = cdbLgrDB + { lgrDB = cdbLedgerDB , bcfg = configBlock cdbTopLevelConfig , varInvalid = cdbInvalid , varTentativeState = cdbTentativeState @@ -608,13 +615,14 @@ chainSelectionForBlock cdb@CDB{..} blockCache hdr punish = electric $ do -- the current chain. addToCurrentChain :: HasCallStack - => (ChainHash blk -> Set (HeaderHash blk)) - -> ChainAndLedger blk + => ResourceRegistry m + -> (ChainHash blk -> Set (HeaderHash blk)) + -> ChainAndLedger m blk -- ^ The current chain and ledger -> LoE (AnchoredFragment (Header blk)) -- ^ LoE fragment -> m () - addToCurrentChain succsOf curChainAndLedger loeFrag = do + addToCurrentChain rr succsOf curChainAndLedger loeFrag = do -- Extensions of @B@ that do not exceed the LoE let suffixesAfterB = Paths.maximalCandidates succsOf Nothing (realPointToPoint p) @@ -624,7 +632,7 @@ chainSelectionForBlock cdb@CDB{..} blockCache hdr punish = electric $ do -- If there are no suffixes after @b@, just use the suffix just -- containing @b@ as the sole candidate. Nothing -> - return $ (AF.fromOldestFirst curHead [hdr]) NE.:| [] + return $ AF.fromOldestFirst curHead [hdr] NE.:| [] Just suffixesAfterB' -> -- We can start with an empty cache, because we're only looking -- up the headers /after/ b, so they won't be on the current @@ -655,7 +663,7 @@ chainSelectionForBlock cdb@CDB{..} blockCache hdr punish = electric $ do case chainDiffs of Nothing -> return () Just chainDiffs' -> - chainSelection chainSelEnv chainDiffs' >>= \case + chainSelection chainSelEnv rr chainDiffs' >>= \case Nothing -> return () Just validatedChainDiff -> @@ -690,7 +698,7 @@ chainSelectionForBlock cdb@CDB{..} blockCache hdr punish = electric $ do -- 2. The LoE fragment intersects with the current selection. trimToLoE :: LoE (AnchoredFragment (Header blk)) -> - ChainAndLedger blk -> + ChainAndLedger m blk -> ChainDiff (Header blk) -> ChainDiff (Header blk) trimToLoE LoEDisabled _ diff = diff @@ -716,16 +724,17 @@ chainSelectionForBlock cdb@CDB{..} blockCache hdr punish = electric $ do -- a new fork. switchToAFork :: HasCallStack - => (ChainHash blk -> Set (HeaderHash blk)) + => ResourceRegistry m + -> (ChainHash blk -> Set (HeaderHash blk)) -> LookupBlockInfo blk - -> ChainAndLedger blk + -> ChainAndLedger m blk -- ^ The current chain (anchored at @i@) and ledger -> LoE (AnchoredFragment (Header blk)) -- ^ LoE fragment -> ChainDiff (HeaderFields blk) -- ^ Header fields for @(x,b]@ -> m () - switchToAFork succsOf lookupBlockInfo curChainAndLedger loeFrag diff = do + switchToAFork rr succsOf lookupBlockInfo curChainAndLedger loeFrag diff = do -- We use a cache to avoid reading the headers from disk multiple -- times in case they're part of multiple forks that go through @b@. let initCache = Map.singleton (headerHash hdr) hdr @@ -761,7 +770,7 @@ chainSelectionForBlock cdb@CDB{..} blockCache hdr punish = electric $ do -- No candidates preferred over the current chain Nothing -> return () Just chainDiffs' -> - chainSelection chainSelEnv chainDiffs' >>= \case + chainSelection chainSelEnv rr chainDiffs' >>= \case Nothing -> return () Just validatedChainDiff -> @@ -776,9 +785,9 @@ chainSelectionForBlock cdb@CDB{..} blockCache hdr punish = electric $ do mkSelectionChangedInfo :: AnchoredFragment (Header blk) -- ^ old chain -> AnchoredFragment (Header blk) -- ^ new chain - -> LedgerDB' blk -- ^ new LedgerDB + -> ExtLedgerState blk EmptyMK -- ^ new tip -> SelectionChangedInfo blk - mkSelectionChangedInfo oldChain newChain newLedgerDB = + mkSelectionChangedInfo oldChain newChain newTip = SelectionChangedInfo { newTipPoint = castRealPoint tipPoint , newTipEpoch = tipEpoch @@ -793,8 +802,8 @@ chainSelectionForBlock cdb@CDB{..} blockCache hdr punish = electric $ do cfg :: TopLevelConfig blk cfg = cdbTopLevelConfig - ledger :: LedgerState blk - ledger = ledgerState (LgrDB.ledgerDbCurrent newLedgerDB) + ledger :: LedgerState blk EmptyMK + ledger = ledgerState newTip summary :: History.Summary (HardForkIndices blk) summary = hardForkSummary @@ -811,7 +820,7 @@ chainSelectionForBlock cdb@CDB{..} blockCache hdr punish = electric $ do in (blockRealPoint tipHdr, tipEpochData, sv) -- | Try to apply the given 'ChainDiff' on the current chain fragment. The - -- 'LgrDB.LedgerDB' is updated in the same transaction. + -- 'LedgerDB' is updated in the same transaction. -- -- Note that we /cannot/ have switched to a different current chain in the -- meantime, since this function will only be called by a single @@ -823,7 +832,7 @@ chainSelectionForBlock cdb@CDB{..} blockCache hdr punish = electric $ do -- us, as we cannot roll back more than @k@ headers anyway. switchTo :: HasCallStack - => ValidatedChainDiff (Header blk) (LedgerDB' blk) + => ValidatedChainDiff (Header blk) (Forker' m blk) -- ^ Chain and ledger to switch to -> StrictTVar m (StrictMaybe (Header blk)) -- ^ Tentative header @@ -836,23 +845,23 @@ chainSelectionForBlock cdb@CDB{..} blockCache hdr punish = electric $ do $ AF.headPoint $ getSuffix $ getChainDiff vChainDiff - (curChain, newChain, events, prevTentativeHeader) <- atomically $ do + (curChain, newChain, events, prevTentativeHeader, newLedger) <- atomically $ do curChain <- readTVar cdbChain -- Not Query.getCurrentChain! - curLedger <- LgrDB.getCurrent cdbLgrDB + curLedger <- LedgerDB.getVolatileTip cdbLedgerDB + newLedger <- LedgerDB.forkerGetLedgerState newForker case Diff.apply curChain chainDiff of -- Impossible, as described in the docstring Nothing -> error "chainDiff doesn't fit onto current chain" Just newChain -> do writeTVar cdbChain newChain - LgrDB.setCurrent cdbLgrDB newLedger - + LedgerDB.forkerCommit newForker -- Inspect the new ledger for potential problems let events :: [LedgerEvent blk] events = inspectLedger cdbTopLevelConfig - (ledgerState $ LgrDB.ledgerDbCurrent curLedger) - (ledgerState $ LgrDB.ledgerDbCurrent newLedger) + (ledgerState curLedger) + (ledgerState newLedger) -- Clear the tentative header prevTentativeHeader <- swapTVar varTentativeHeader SNothing @@ -873,8 +882,7 @@ chainSelectionForBlock cdb@CDB{..} blockCache hdr punish = electric $ do followerHandles <- Map.elems <$> readTVar cdbFollowers forM_ followerHandles $ switchFollowerToFork curChain newChain ipoint - return (curChain, newChain, events, prevTentativeHeader) - + return (curChain, newChain, events, prevTentativeHeader, newLedger) let mkTraceEvent = case chainSwitchType of AddingBlocks -> AddedToCurrentChain SwitchingToAFork -> SwitchedToAFork @@ -884,6 +892,8 @@ chainSelectionForBlock cdb@CDB{..} blockCache hdr punish = electric $ do whenJust (strictMaybeToMaybe prevTentativeHeader) $ traceWith $ PipeliningEvent . OutdatedTentativeHeader >$< addBlockTracer + LedgerDB.forkerClose newForker + where -- Given the current chain and the new chain as chain fragments, and the -- intersection point (an optimization, since it has already been @@ -896,7 +906,7 @@ chainSelectionForBlock cdb@CDB{..} blockCache hdr punish = electric $ do in assert (AF.withinFragmentBounds (castPoint ipoint) newChain) $ \followerHandle -> fhSwitchFork followerHandle ipoint oldPoints - ValidatedChainDiff chainDiff newLedger = vChainDiff + ValidatedChainDiff chainDiff newForker = vChainDiff -- | We have a new block @b@ that doesn't fit onto the current chain, but -- we have found a 'ChainDiff' connecting it to the current chain via @@ -936,7 +946,7 @@ getKnownHeaderThroughCache volatileDB hash = gets (Map.lookup hash) >>= \case -- | Environment used by 'chainSelection' and related functions. data ChainSelEnv m blk = ChainSelEnv - { lgrDB :: LgrDB m blk + { lgrDB :: LedgerDB' m blk , validationTracer :: Tracer m (TraceValidationEvent blk) , pipeliningTracer :: Tracer m (TracePipeliningEvent blk) , bcfg :: BlockConfig blk @@ -945,7 +955,7 @@ data ChainSelEnv m blk = ChainSelEnv , varTentativeHeader :: StrictTVar m (StrictMaybe (Header blk)) , getTentativeFollowers :: STM m [FollowerHandle m blk] , blockCache :: BlockCache blk - , curChainAndLedger :: ChainAndLedger blk + , curChainAndLedger :: ChainAndLedger m blk -- | The block that this chain selection invocation is processing, and the -- punish action for the peer that sent that block; see -- 'InvalidBlockPunishment'. @@ -981,12 +991,13 @@ chainSelection :: , HasCallStack ) => ChainSelEnv m blk + -> ResourceRegistry m -> NonEmpty (ChainDiff (Header blk)) - -> m (Maybe (ValidatedChainDiff (Header blk) (LedgerDB' blk))) + -> m (Maybe (ValidatedChainDiff (Header blk) (Forker' m blk))) -- ^ The (valid) chain diff and corresponding LedgerDB that was selected, -- or 'Nothing' if there is no valid chain diff preferred over the current -- chain. -chainSelection chainSelEnv chainDiffs = +chainSelection chainSelEnv rr chainDiffs = assert (all (preferAnchoredCandidate bcfg curChain . Diff.getSuffix) chainDiffs) $ assert (all (isJust . Diff.apply curChain) @@ -1011,11 +1022,11 @@ chainSelection chainSelEnv chainDiffs = -- [Ouroboros] below. go :: [ChainDiff (Header blk)] - -> m (Maybe (ValidatedChainDiff (Header blk) (LedgerDB' blk))) + -> m (Maybe (ValidatedChainDiff (Header blk) (Forker' m blk))) go [] = return Nothing go (candidate:candidates0) = do mTentativeHeader <- setTentativeHeader - validateCandidate chainSelEnv candidate >>= \case + validateCandidate chainSelEnv rr candidate >>= \case InsufficientSuffix -> -- When the body of the tentative block turns out to be invalid, we -- have a valid *empty* prefix, as the tentative header fits on top @@ -1120,9 +1131,9 @@ chainSelection chainSelEnv chainDiffs = -- peer's valid chain. -- | Result of 'validateCandidate'. -data ValidationResult blk = +data ValidationResult m blk = -- | The entire candidate fragment was valid. - FullyValid (ValidatedChainDiff (Header blk) (LedgerDB' blk)) + FullyValid (ValidatedChainDiff (Header blk) (Forker' m blk)) -- | The candidate fragment contained invalid blocks that had to -- be truncated from the fragment. @@ -1147,6 +1158,9 @@ data ValidationResult blk = -- If a block in the fragment is invalid, then the fragment in the returned -- 'ValidatedChainDiff' is a prefix of the given candidate chain diff (upto -- the last valid block). +-- +-- Note that this function returns a 'Forker', and that this forker should be +-- closed when it is no longer used! ledgerValidateCandidate :: forall m blk. ( IOLike m @@ -1154,19 +1168,20 @@ ledgerValidateCandidate :: , HasCallStack ) => ChainSelEnv m blk + -> ResourceRegistry m -> ChainDiff (Header blk) - -> m (ValidatedChainDiff (Header blk) (LedgerDB' blk)) -ledgerValidateCandidate chainSelEnv chainDiff@(ChainDiff rollback suffix) = - LgrDB.validate lgrDB curLedger blockCache rollback traceUpdate newBlocks >>= \case - LgrDB.ValidateExceededRollBack {} -> - -- Impossible: we asked the LgrDB to roll back past the immutable tip, - -- which is impossible, since the candidates we construct must connect - -- to the immutable tip. + -> m (ValidatedChainDiff (Header blk) (Forker' m blk)) +ledgerValidateCandidate chainSelEnv rr chainDiff@(ChainDiff rollback suffix) = + LedgerDB.validate lgrDB rr traceUpdate blockCache rollback newBlocks >>= \case + ValidateExceededRollBack {} -> + -- Impossible: we asked the LedgerDB to roll back past the immutable + -- tip, which is impossible, since the candidates we construct must + -- connect to the immutable tip. error "found candidate requiring rolling back past the immutable tip" - LgrDB.ValidateLedgerError (LgrDB.AnnLedgerError ledger' pt e) -> do - let lastValid = LgrDB.currentPoint ledger' - chainDiff' = Diff.truncate (castPoint lastValid) chainDiff + ValidateLedgerError (AnnLedgerError ledger' pt e) -> do + lastValid <- atomically $ LedgerDB.forkerCurrentPoint ledger' + let chainDiff' = Diff.truncate (castPoint lastValid) chainDiff traceWith validationTracer (InvalidBlock e pt) addInvalidBlock e pt traceWith validationTracer (ValidCandidate (Diff.getSuffix chainDiff')) @@ -1195,16 +1210,15 @@ ledgerValidateCandidate chainSelEnv chainDiff@(ChainDiff rollback suffix) = -- we should punish. (Tacit assumption made here: it's impossible -- three blocks in a row have the same slot.) - return $ ValidatedDiff.new chainDiff' ledger' + ValidatedDiff.newM chainDiff' ledger' - LgrDB.ValidateSuccessful ledger' -> do + ValidateSuccessful ledger' -> do traceWith validationTracer (ValidCandidate suffix) - return $ ValidatedDiff.new chainDiff ledger' + ValidatedDiff.newM chainDiff ledger' where ChainSelEnv { lgrDB , validationTracer - , curChainAndLedger , blockCache , varInvalid , punish @@ -1212,9 +1226,6 @@ ledgerValidateCandidate chainSelEnv chainDiff@(ChainDiff rollback suffix) = traceUpdate = traceWith $ UpdateLedgerDbTraceEvent >$< validationTracer - curLedger :: LedgerDB' blk - curLedger = VF.validatedLedger curChainAndLedger - newBlocks :: [Header blk] newBlocks = AF.toOldestFirst suffix @@ -1233,13 +1244,14 @@ validateCandidate :: , HasCallStack ) => ChainSelEnv m blk + -> ResourceRegistry m -> ChainDiff (Header blk) - -> m (ValidationResult blk) -validateCandidate chainSelEnv chainDiff = - ledgerValidateCandidate chainSelEnv chainDiff >>= \case + -> m (ValidationResult m blk) +validateCandidate chainSelEnv rr chainDiff = + ledgerValidateCandidate chainSelEnv rr chainDiff >>= \case validatedChainDiff | ValidatedDiff.rollbackExceedsSuffix validatedChainDiff - -> return InsufficientSuffix + -> cleanup validatedChainDiff >> return InsufficientSuffix | AF.length (Diff.getSuffix chainDiff) == AF.length (Diff.getSuffix chainDiff') -- No truncation @@ -1253,13 +1265,19 @@ validateCandidate chainSelEnv chainDiff = where chainDiff' = ValidatedDiff.getChainDiff validatedChainDiff + where + -- If this function does not return a validated chain diff, then there is a + -- leftover forker that we have to close so that its resources are correctly + -- released. + cleanup :: ValidatedChainDiff b (Forker' m blk) -> m () + cleanup = LedgerDB.forkerClose . getLedger {------------------------------------------------------------------------------- 'ChainAndLedger' -------------------------------------------------------------------------------} -- | Instantiate 'ValidatedFragment' in the way that chain selection requires. -type ChainAndLedger blk = ValidatedFragment (Header blk) (LedgerDB' blk) +type ChainAndLedger m blk = ValidatedFragment (Header blk) (Forker' m blk) {------------------------------------------------------------------------------- Diffusion pipelining diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/ChainDB/Impl/LgrDB.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/ChainDB/Impl/LgrDB.hs deleted file mode 100644 index e27c047e46..0000000000 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/ChainDB/Impl/LgrDB.hs +++ /dev/null @@ -1,397 +0,0 @@ -{-# LANGUAGE ConstraintKinds #-} -{-# LANGUAGE DeriveAnyClass #-} -{-# LANGUAGE DeriveGeneric #-} -{-# LANGUAGE FlexibleContexts #-} -{-# LANGUAGE LambdaCase #-} -{-# LANGUAGE NamedFieldPuns #-} -{-# LANGUAGE RecordWildCards #-} -{-# LANGUAGE ScopedTypeVariables #-} -{-# LANGUAGE StandaloneDeriving #-} -{-# LANGUAGE TypeApplications #-} -{-# LANGUAGE TypeFamilies #-} -{-# LANGUAGE TypeOperators #-} - --- | Thin wrapper around the LedgerDB -module Ouroboros.Consensus.Storage.ChainDB.Impl.LgrDB ( - LgrDB - -- opaque - , LedgerDB' - , LgrDbSerialiseConstraints - -- * Initialization - , LgrDbArgs (..) - , defaultArgs - , openDB - -- * 'TraceReplayEvent' decorator - , LedgerDB.decorateReplayTracerWithGoal - -- * Wrappers - , currentPoint - , getCurrent - , getDiskPolicy - , setCurrent - , takeSnapshot - , trimSnapshots - -- * Validation - , ValidateResult (..) - , validate - -- * Previously applied blocks - , garbageCollectPrevApplied - , getPrevApplied - -- * Re-exports - , LedgerDB.AnnLedgerError (..) - , LedgerDB.DiskPolicy (..) - , LedgerDB.DiskSnapshot - , LedgerDB.ExceededRollback (..) - , LedgerDB.TraceReplayEvent (..) - , LedgerDB.TraceSnapshotEvent (..) - , LedgerDB.ledgerDbCurrent - -- * Exported for testing purposes - , mkLgrDB - ) where - -import Codec.Serialise (Serialise (decode)) -import Control.Monad.Trans.Class -import Control.Tracer -import Data.Foldable as Foldable (foldl') -import Data.Set (Set) -import qualified Data.Set as Set -import Data.Word (Word64) -import GHC.Generics (Generic) -import GHC.Stack (HasCallStack) -import Ouroboros.Consensus.Block -import Ouroboros.Consensus.Config -import Ouroboros.Consensus.HeaderValidation -import Ouroboros.Consensus.Ledger.Abstract -import Ouroboros.Consensus.Ledger.Extended -import Ouroboros.Consensus.Ledger.Inspect -import Ouroboros.Consensus.Ledger.SupportsProtocol -import Ouroboros.Consensus.Protocol.Abstract -import Ouroboros.Consensus.Storage.ChainDB.API (ChainDbFailure (..)) -import Ouroboros.Consensus.Storage.ChainDB.Impl.BlockCache - (BlockCache) -import qualified Ouroboros.Consensus.Storage.ChainDB.Impl.BlockCache as BlockCache -import Ouroboros.Consensus.Storage.ImmutableDB (ImmutableDB) -import Ouroboros.Consensus.Storage.ImmutableDB.Stream -import Ouroboros.Consensus.Storage.LedgerDB (LedgerDB') -import qualified Ouroboros.Consensus.Storage.LedgerDB as LedgerDB -import Ouroboros.Consensus.Storage.Serialisation -import Ouroboros.Consensus.Util.Args -import Ouroboros.Consensus.Util.IOLike -import System.FS.API (SomeHasFS (..), createDirectoryIfMissing) -import System.FS.API.Types (FsError, mkFsPath) - --- | Thin wrapper around the ledger database -data LgrDB m blk = LgrDB { - varDB :: !(StrictTVar m (LedgerDB' blk)) - -- ^ INVARIANT: the tip of the 'LedgerDB' is always in sync with the tip - -- of the current chain of the ChainDB. - , varPrevApplied :: !(StrictTVar m (Set (RealPoint blk))) - -- ^ INVARIANT: this set contains only points that are in the - -- VolatileDB. - -- - -- INVARIANT: all points on the current chain fragment are in this set. - -- - -- The VolatileDB might contain invalid blocks, these will not be in - -- this set. - -- - -- When a garbage-collection is performed on the VolatileDB, the points - -- of the blocks eligible for garbage-collection should be removed from - -- this set. - , resolveBlock :: !(RealPoint blk -> m blk) - -- ^ Read a block from disk - , cfg :: !(LedgerDB.LedgerDbCfg (ExtLedgerState blk)) - , diskPolicy :: !LedgerDB.DiskPolicy - , hasFS :: !(SomeHasFS m) - , tracer :: !(Tracer m (LedgerDB.TraceSnapshotEvent blk)) - } deriving (Generic) - -deriving instance (IOLike m, LedgerSupportsProtocol blk) - => NoThunks (LgrDB m blk) - -- use generic instance - --- | 'EncodeDisk' and 'DecodeDisk' constraints needed for the LgrDB. -type LgrDbSerialiseConstraints blk = - ( Serialise (HeaderHash blk) - , EncodeDisk blk (LedgerState blk) - , DecodeDisk blk (LedgerState blk) - , EncodeDisk blk (AnnTip blk) - , DecodeDisk blk (AnnTip blk) - , EncodeDisk blk (ChainDepState (BlockProtocol blk)) - , DecodeDisk blk (ChainDepState (BlockProtocol blk)) - ) - -{------------------------------------------------------------------------------- - Initialization --------------------------------------------------------------------------------} - -data LgrDbArgs f m blk = LgrDbArgs { - lgrDiskPolicyArgs :: LedgerDB.DiskPolicyArgs - , lgrGenesis :: HKD f (m (ExtLedgerState blk)) - , lgrHasFS :: HKD f (SomeHasFS m) - , lgrConfig :: HKD f (LedgerDB.LedgerDbCfg (ExtLedgerState blk)) - , lgrTracer :: Tracer m (LedgerDB.TraceSnapshotEvent blk) - } - --- | Default arguments -defaultArgs :: Applicative m => Incomplete LgrDbArgs m blk -defaultArgs = LgrDbArgs { - lgrDiskPolicyArgs = LedgerDB.defaultDiskPolicyArgs - , lgrGenesis = noDefault - , lgrHasFS = noDefault - , lgrConfig = noDefault - , lgrTracer = nullTracer - } - --- | Open the ledger DB --- --- In addition to the ledger DB also returns the number of immutable blocks --- that were replayed. -openDB :: forall m blk. - ( IOLike m - , LedgerSupportsProtocol blk - , LgrDbSerialiseConstraints blk - , InspectLedger blk - , HasCallStack - ) - => Complete LgrDbArgs m blk - -- ^ Stateless initializaton arguments - -> Tracer m (LedgerDB.ReplayGoal blk -> LedgerDB.TraceReplayEvent blk) - -- ^ Used to trace the progress while replaying blocks against the - -- ledger. - -> ImmutableDB m blk - -- ^ Reference to the immutable DB - -- - -- After reading a snapshot from disk, the ledger DB will be brought - -- up to date with tip of the immutable DB. The corresponding ledger - -- state can then be used as the starting point for chain selection in - -- the ChainDB driver. - -> (RealPoint blk -> m blk) - -- ^ Read a block from disk - -- - -- The block may be in the immutable DB or in the volatile DB; the ledger - -- DB does not know where the boundary is at any given point. - -> m (LgrDB m blk, Word64) -openDB args@LgrDbArgs { lgrHasFS = lgrHasFS@(SomeHasFS hasFS), .. } replayTracer immutableDB getBlock = do - createDirectoryIfMissing hasFS True (mkFsPath []) - (db, replayed) <- initFromDisk args replayTracer immutableDB - -- When initializing the ledger DB from disk we: - -- - -- - Look for the newest valid snapshot, say 'Lbs', which corresponds to the - -- application of a block in the immutable DB, say 'b'. - -- - -- - Push onto the ledger DB all the ledger states that result from applying - -- blocks found in the on-disk immutable DB, starting from the successor - -- of 'b'. - -- - -- The anchor of 'LedgerDB' must be the oldest point we can rollback to. So - -- if we follow the procedure described above (that 'initFromDisk' - -- implements), the newest ledger state in 'db', say 'Lbn' corresponds to - -- the most recent block in the immutable DB. If this block is in the - -- immutable DB, it means that at some point it was part of a chain that was - -- >k blocks long. Thus 'Lbn' is the oldest point we can roll back to. - -- Therefore, we need to make the newest state (current) of the ledger DB - -- the anchor. - let dbPrunedToImmDBTip = LedgerDB.ledgerDbPrune (SecurityParam 0) db - (varDB, varPrevApplied) <- - (,) <$> newTVarIO dbPrunedToImmDBTip <*> newTVarIO Set.empty - return ( - LgrDB { - varDB = varDB - , varPrevApplied = varPrevApplied - , resolveBlock = getBlock - , cfg = lgrConfig - , diskPolicy = let k = LedgerDB.ledgerDbCfgSecParam lgrConfig - in LedgerDB.mkDiskPolicy k lgrDiskPolicyArgs - , hasFS = lgrHasFS - , tracer = lgrTracer - } - , replayed - ) - -initFromDisk :: - forall blk m. - ( IOLike m - , LedgerSupportsProtocol blk - , LgrDbSerialiseConstraints blk - , InspectLedger blk - , HasCallStack - ) - => Complete LgrDbArgs m blk - -> Tracer m (LedgerDB.ReplayGoal blk -> LedgerDB.TraceReplayEvent blk) - -> ImmutableDB m blk - -> m (LedgerDB' blk, Word64) -initFromDisk LgrDbArgs { lgrHasFS = hasFS, .. } - replayTracer - immutableDB = wrapFailure (Proxy @blk) $ do - (_initLog, db, replayed) <- - LedgerDB.initLedgerDB - replayTracer - lgrTracer - hasFS - (decodeDiskExtLedgerState ccfg) - decode - lgrConfig - lgrGenesis - (streamAPI immutableDB) - return (db, replayed) - where - ccfg = configCodec $ getExtLedgerCfg $ LedgerDB.ledgerDbCfg lgrConfig - --- | For testing purposes -mkLgrDB :: StrictTVar m (LedgerDB' blk) - -> StrictTVar m (Set (RealPoint blk)) - -> (RealPoint blk -> m blk) - -> Complete LgrDbArgs m blk - -> SecurityParam - -> LgrDB m blk -mkLgrDB varDB varPrevApplied resolveBlock args k = LgrDB {..} - where - LgrDbArgs { - lgrConfig = cfg - , lgrDiskPolicyArgs = diskPolicyArgs - , lgrHasFS = hasFS - , lgrTracer = tracer - } = args - diskPolicy = LedgerDB.mkDiskPolicy k diskPolicyArgs - -{------------------------------------------------------------------------------- - Wrappers --------------------------------------------------------------------------------} - -getCurrent :: IOLike m => LgrDB m blk -> STM m (LedgerDB' blk) -getCurrent LgrDB{..} = readTVar varDB - --- | PRECONDITION: The new 'LedgerDB' must be the result of calling either --- 'LedgerDB.ledgerDbSwitch' or 'LedgerDB.ledgerDbPushMany' on the current --- 'LedgerDB'. -setCurrent :: IOLike m => LgrDB m blk -> LedgerDB' blk -> STM m () -setCurrent LgrDB{..} = writeTVar $! varDB - -currentPoint :: forall blk. UpdateLedger blk => LedgerDB' blk -> Point blk -currentPoint = castPoint - . ledgerTipPoint - . ledgerState - . LedgerDB.ledgerDbCurrent - -takeSnapshot :: - forall m blk. - ( IOLike m - , LgrDbSerialiseConstraints blk - , HasHeader blk - , IsLedger (LedgerState blk) - ) - => LgrDB m blk -> m (Maybe (LedgerDB.DiskSnapshot, RealPoint blk)) -takeSnapshot lgrDB@LgrDB{ cfg, tracer, hasFS } = wrapFailure (Proxy @blk) $ do - ledgerDB <- LedgerDB.ledgerDbAnchor <$> atomically (getCurrent lgrDB) - LedgerDB.takeSnapshot - tracer - hasFS - (encodeDiskExtLedgerState ccfg) - ledgerDB - where - ccfg = configCodec $ getExtLedgerCfg $ LedgerDB.ledgerDbCfg cfg - -trimSnapshots :: - forall m blk. (MonadCatch m, HasHeader blk) - => LgrDB m blk - -> m [LedgerDB.DiskSnapshot] -trimSnapshots LgrDB { diskPolicy, tracer, hasFS } = wrapFailure (Proxy @blk) $ - LedgerDB.trimSnapshots tracer hasFS diskPolicy - -getDiskPolicy :: LgrDB m blk -> LedgerDB.DiskPolicy -getDiskPolicy = diskPolicy - -{------------------------------------------------------------------------------- - Validation --------------------------------------------------------------------------------} - -data ValidateResult blk = - ValidateSuccessful (LedgerDB' blk) - | ValidateLedgerError (LedgerDB.AnnLedgerError' blk) - | ValidateExceededRollBack LedgerDB.ExceededRollback - -validate :: forall m blk. (IOLike m, LedgerSupportsProtocol blk, HasCallStack) - => LgrDB m blk - -> LedgerDB' blk - -- ^ This is used as the starting point for validation, not the one - -- in the 'LgrDB'. - -> BlockCache blk - -> Word64 -- ^ How many blocks to roll back - -> (LedgerDB.UpdateLedgerDbTraceEvent blk -> m ()) - -> [Header blk] - -> m (ValidateResult blk) -validate LgrDB{..} ledgerDB blockCache numRollbacks trace = \hdrs -> do - aps <- mkAps hdrs <$> atomically (readTVar varPrevApplied) - res <- fmap rewrap $ LedgerDB.defaultResolveWithErrors resolveBlock $ - LedgerDB.ledgerDbSwitch - cfg - numRollbacks - (lift . lift . trace) - aps - ledgerDB - atomically $ modifyTVar varPrevApplied $ - addPoints (validBlockPoints res (map headerRealPoint hdrs)) - return res - where - rewrap :: Either (LedgerDB.AnnLedgerError' blk) (Either LedgerDB.ExceededRollback (LedgerDB' blk)) - -> ValidateResult blk - rewrap (Left e) = ValidateLedgerError e - rewrap (Right (Left e)) = ValidateExceededRollBack e - rewrap (Right (Right l)) = ValidateSuccessful l - - mkAps :: forall n l. l ~ ExtLedgerState blk - => [Header blk] - -> Set (RealPoint blk) - -> [LedgerDB.Ap n l blk ( LedgerDB.ResolvesBlocks n blk - , LedgerDB.ThrowsLedgerError n l blk - )] - mkAps hdrs prevApplied = - [ case ( Set.member (headerRealPoint hdr) prevApplied - , BlockCache.lookup (headerHash hdr) blockCache - ) of - (False, Nothing) -> LedgerDB.ApplyRef (headerRealPoint hdr) - (True, Nothing) -> LedgerDB.Weaken $ LedgerDB.ReapplyRef (headerRealPoint hdr) - (False, Just blk) -> LedgerDB.Weaken $ LedgerDB.ApplyVal blk - (True, Just blk) -> LedgerDB.Weaken $ LedgerDB.ReapplyVal blk - | hdr <- hdrs - ] - - -- | Based on the 'ValidateResult', return the hashes corresponding to - -- valid blocks. - validBlockPoints :: ValidateResult blk -> [RealPoint blk] -> [RealPoint blk] - validBlockPoints = \case - ValidateExceededRollBack _ -> const [] - ValidateSuccessful _ -> id - ValidateLedgerError e -> takeWhile (/= LedgerDB.annLedgerErrRef e) - - addPoints :: [RealPoint blk] - -> Set (RealPoint blk) -> Set (RealPoint blk) - addPoints hs set = Foldable.foldl' (flip Set.insert) set hs - -{------------------------------------------------------------------------------- - Previously applied blocks --------------------------------------------------------------------------------} - -getPrevApplied :: IOLike m => LgrDB m blk -> STM m (Set (RealPoint blk)) -getPrevApplied LgrDB{..} = readTVar varPrevApplied - --- | Remove all points with a slot older than the given slot from the set of --- previously applied points. -garbageCollectPrevApplied :: IOLike m => LgrDB m blk -> SlotNo -> STM m () -garbageCollectPrevApplied LgrDB{..} slotNo = modifyTVar varPrevApplied $ - Set.dropWhileAntitone ((< slotNo) . realPointSlot) - -{------------------------------------------------------------------------------- - Error handling --------------------------------------------------------------------------------} - --- | Wrap exceptions that may indicate disk failure in a 'ChainDbFailure' --- exception using the 'LgrDbFailure' constructor. -wrapFailure :: - forall m x blk. (MonadCatch m, HasHeader blk) - => Proxy blk - -> m x - -> m x -wrapFailure _ k = catch k rethrow - where - rethrow :: FsError -> m x - rethrow err = throwIO $ LgrDbFailure @blk err diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/ChainDB/Impl/Query.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/ChainDB/Impl/Query.hs index 5bea8cd37c..5d9faba7e0 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/ChainDB/Impl/Query.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/ChainDB/Impl/Query.hs @@ -9,12 +9,17 @@ module Ouroboros.Consensus.Storage.ChainDB.Impl.Query ( -- * Queries getBlockComponent , getCurrentChain + , getCurrentLedger , getHeaderStateHistory + , getImmutableLedger , getIsFetched , getIsInvalidBlock , getIsValid - , getLedgerDB + , getLedgerTablesAtFor , getMaxSlotNo + , getPastLedger + , getReadOnlyForkerAtPoint + , getStatistics , getTipBlock , getTipHeader , getTipPoint @@ -24,23 +29,22 @@ module Ouroboros.Consensus.Storage.ChainDB.Impl.Query ( , getAnyKnownBlockComponent ) where +import Control.ResourceRegistry (ResourceRegistry) import qualified Data.Map.Strict as Map import qualified Data.Set as Set import Ouroboros.Consensus.Block import Ouroboros.Consensus.Config -import Ouroboros.Consensus.HardFork.Abstract (HasHardForkHistory (..)) -import Ouroboros.Consensus.HeaderStateHistory - (HeaderStateHistory (..), mkHeaderStateWithTimeFromSummary) -import Ouroboros.Consensus.HeaderValidation (HasAnnTip) -import Ouroboros.Consensus.Ledger.Abstract (IsLedger, LedgerState) +import Ouroboros.Consensus.HeaderStateHistory (HeaderStateHistory) +import Ouroboros.Consensus.Ledger.Abstract import Ouroboros.Consensus.Ledger.Extended import Ouroboros.Consensus.Protocol.Abstract import Ouroboros.Consensus.Storage.ChainDB.API (BlockComponent (..), ChainDbFailure (..)) -import qualified Ouroboros.Consensus.Storage.ChainDB.Impl.LgrDB as LgrDB import Ouroboros.Consensus.Storage.ChainDB.Impl.Types import Ouroboros.Consensus.Storage.ImmutableDB (ImmutableDB) import qualified Ouroboros.Consensus.Storage.ImmutableDB as ImmutableDB +import Ouroboros.Consensus.Storage.LedgerDB (GetForkerError, + ReadOnlyForker', Statistics) import qualified Ouroboros.Consensus.Storage.LedgerDB as LedgerDB import Ouroboros.Consensus.Storage.VolatileDB (VolatileDB) import qualified Ouroboros.Consensus.Storage.VolatileDB as VolatileDB @@ -50,6 +54,7 @@ import Ouroboros.Consensus.Util.STM (WithFingerprint (..)) import Ouroboros.Network.AnchoredFragment (AnchoredFragment) import qualified Ouroboros.Network.AnchoredFragment as AF import Ouroboros.Network.Block (MaxSlotNo, maxSlotNoFromWithOrigin) +import Ouroboros.Network.Protocol.LocalStateQuery.Type -- | Return the last @k@ headers. -- @@ -79,35 +84,10 @@ getCurrentChain CDB{..} = where SecurityParam k = configSecurityParam cdbTopLevelConfig -getLedgerDB :: - IOLike m - => ChainDbEnv m blk -> STM m (LgrDB.LedgerDB' blk) -getLedgerDB CDB{..} = LgrDB.getCurrent cdbLgrDB - -- | Get a 'HeaderStateHistory' populated with the 'HeaderState's of the -- last @k@ blocks of the current chain. -getHeaderStateHistory :: - forall m blk. - ( IOLike m - , HasHardForkHistory blk - , HasAnnTip blk - , IsLedger (LedgerState blk) - ) - => ChainDbEnv m blk -> STM m (HeaderStateHistory blk) -getHeaderStateHistory cdb@CDB{cdbTopLevelConfig = cfg} = do - ledgerDb <- getLedgerDB cdb - let currentLedgerState = ledgerState $ LedgerDB.ledgerDbCurrent ledgerDb - -- This summary can convert all tip slots of the ledger states in the - -- @ledgerDb@ as these are not newer than the tip slot of the current - -- ledger state (Property 17.1 in the Consensus report). - summary = hardForkSummary (configLedger cfg) currentLedgerState - mkHeaderStateWithTime' = - mkHeaderStateWithTimeFromSummary summary - . headerState - pure - . HeaderStateHistory - . LedgerDB.ledgerDbBimap mkHeaderStateWithTime' mkHeaderStateWithTime' - $ ledgerDb +getHeaderStateHistory :: ChainDbEnv m blk -> STM m (HeaderStateHistory blk) +getHeaderStateHistory = LedgerDB.getHeaderStateHistory . cdbLedgerDB getTipBlock :: forall m blk. @@ -135,8 +115,8 @@ getTipHeader CDB{..} = do anchorOrHdr <- AF.head <$> atomically (readTVar cdbChain) case anchorOrHdr of Right hdr -> return $ Just hdr - Left anchor -> - case pointToWithOriginRealPoint (castPoint (AF.anchorToPoint anchor)) of + Left anch -> + case pointToWithOriginRealPoint (castPoint (AF.anchorToPoint anch)) of Origin -> return Nothing NotOrigin p -> -- In this case, the fragment is empty but the anchor point is not @@ -151,7 +131,7 @@ getTipPoint :: forall m blk. (IOLike m, HasHeader (Header blk)) => ChainDbEnv m blk -> STM m (Point blk) getTipPoint CDB{..} = - (castPoint . AF.headPoint) <$> readTVar cdbChain + castPoint . AF.headPoint <$> readTVar cdbChain getBlockComponent :: forall m blk b. IOLike m @@ -186,7 +166,7 @@ getIsValid :: => ChainDbEnv m blk -> STM m (RealPoint blk -> Maybe Bool) getIsValid CDB{..} = do - prevApplied <- LgrDB.getPrevApplied cdbLgrDB + prevApplied <- LedgerDB.getPrevApplied cdbLedgerDB invalid <- forgetFingerprint <$> readTVar cdbInvalid return $ \pt@(RealPoint _ hash) -> -- Blocks from the future that were valid according to the ledger but @@ -214,6 +194,46 @@ getMaxSlotNo CDB{..} = do volatileDbMaxSlotNo <- VolatileDB.getMaxSlotNo cdbVolatileDB return $ curChainMaxSlotNo `max` volatileDbMaxSlotNo +-- | Get current ledger +getCurrentLedger :: ChainDbEnv m blk -> STM m (ExtLedgerState blk EmptyMK) +getCurrentLedger CDB{..} = LedgerDB.getVolatileTip cdbLedgerDB + +-- | Get the immutable ledger, i.e., typically @k@ blocks back. +getImmutableLedger :: ChainDbEnv m blk -> STM m (ExtLedgerState blk EmptyMK) +getImmutableLedger CDB{..} = LedgerDB.getImmutableTip cdbLedgerDB + +-- | Get the ledger for the given point. +-- +-- When the given point is not among the last @k@ blocks of the current +-- chain (i.e., older than @k@ or not on the current chain), 'Nothing' is +-- returned. +getPastLedger :: + ChainDbEnv m blk + -> Point blk + -> STM m (Maybe (ExtLedgerState blk EmptyMK)) +getPastLedger CDB{..} = LedgerDB.getPastLedgerState cdbLedgerDB + +getReadOnlyForkerAtPoint :: + IOLike m + => ChainDbEnv m blk + -> ResourceRegistry m + -> Target (Point blk) + -> m (Either GetForkerError (ReadOnlyForker' m blk)) +getReadOnlyForkerAtPoint CDB{..} = LedgerDB.getReadOnlyForker cdbLedgerDB + +getLedgerTablesAtFor :: + IOLike m + => ChainDbEnv m blk + -> Point blk + -> LedgerTables (ExtLedgerState blk) KeysMK + -> m (Maybe (LedgerTables (ExtLedgerState blk) ValuesMK)) +getLedgerTablesAtFor = + (\ldb pt ks -> eitherToMaybe <$> LedgerDB.readLedgerTablesAtFor ldb pt ks) + . cdbLedgerDB + +getStatistics :: IOLike m => ChainDbEnv m blk -> m (Maybe Statistics) +getStatistics CDB{..} = LedgerDB.getTipStatistics cdbLedgerDB + {------------------------------------------------------------------------------- Unifying interface over the immutable DB and volatile DB, but independent of the ledger DB. These functions therefore do not require the entire @@ -301,4 +321,4 @@ getAnyBlockComponent immutableDB volatileDB blockComponent p = do mustExist :: RealPoint blk -> Maybe b -> Either (ChainDbFailure blk) b mustExist p Nothing = Left $ ChainDbMissingBlock p -mustExist _ (Just b) = Right $ b +mustExist _ (Just b) = Right b diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/ChainDB/Impl/Types.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/ChainDB/Impl/Types.hs index 2eaaad37aa..36450665df 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/ChainDB/Impl/Types.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/ChainDB/Impl/Types.hs @@ -4,6 +4,7 @@ {-# LANGUAGE DeriveGeneric #-} {-# LANGUAGE DerivingVia #-} {-# LANGUAGE FlexibleContexts #-} +{-# LANGUAGE GADTs #-} {-# LANGUAGE GeneralizedNewtypeDeriving #-} {-# LANGUAGE LambdaCase #-} {-# LANGUAGE NamedFieldPuns #-} @@ -77,7 +78,7 @@ import NoThunks.Class (OnlyCheckWhnfNamed (..)) import Ouroboros.Consensus.Block import Ouroboros.Consensus.Config import Ouroboros.Consensus.Fragment.Diff (ChainDiff) -import Ouroboros.Consensus.Ledger.Extended (ExtValidationError) +import Ouroboros.Consensus.Ledger.Extended import Ouroboros.Consensus.Ledger.Inspect import Ouroboros.Consensus.Ledger.SupportsProtocol import Ouroboros.Consensus.Protocol.Abstract @@ -87,13 +88,12 @@ import Ouroboros.Consensus.Storage.ChainDB.API (AddBlockPromise (..), StreamTo, UnknownRange) import Ouroboros.Consensus.Storage.ChainDB.API.Types.InvalidBlockPunishment (InvalidBlockPunishment) -import Ouroboros.Consensus.Storage.ChainDB.Impl.LgrDB (LgrDB, - LgrDbSerialiseConstraints) -import qualified Ouroboros.Consensus.Storage.ChainDB.Impl.LgrDB as LgrDB import Ouroboros.Consensus.Storage.ImmutableDB (ImmutableDB, ImmutableDbSerialiseConstraints) import qualified Ouroboros.Consensus.Storage.ImmutableDB as ImmutableDB -import Ouroboros.Consensus.Storage.LedgerDB (UpdateLedgerDbTraceEvent) +import Ouroboros.Consensus.Storage.LedgerDB (LedgerDB', + LedgerDbSerialiseConstraints, TraceLedgerDBEvent, + TraceValidateEvent) import Ouroboros.Consensus.Storage.Serialisation import Ouroboros.Consensus.Storage.VolatileDB (VolatileDB, VolatileDbSerialiseConstraints) @@ -102,13 +102,14 @@ import Ouroboros.Consensus.Util (Fuse) import Ouroboros.Consensus.Util.CallStack import Ouroboros.Consensus.Util.Enclose (Enclosing, Enclosing' (..)) import Ouroboros.Consensus.Util.IOLike +import Ouroboros.Consensus.Util.Orphans () import Ouroboros.Consensus.Util.STM (WithFingerprint) import Ouroboros.Network.AnchoredFragment (AnchoredFragment) import Ouroboros.Network.Block (MaxSlotNo) -- | All the serialisation related constraints needed by the ChainDB. class ( ImmutableDbSerialiseConstraints blk - , LgrDbSerialiseConstraints blk + , LedgerDbSerialiseConstraints blk , VolatileDbSerialiseConstraints blk -- Needed for Follower , EncodeDiskDep (NestedCtxt Header) blk @@ -169,7 +170,7 @@ data ChainDbState m blk data ChainDbEnv m blk = CDB { cdbImmutableDB :: !(ImmutableDB m blk) , cdbVolatileDB :: !(VolatileDB m blk) - , cdbLgrDB :: !(LgrDB m blk) + , cdbLedgerDB :: !(LedgerDB' m blk) , cdbChain :: !(StrictTVar m (AnchoredFragment (Header blk))) -- ^ Contains the current chain fragment. -- @@ -235,8 +236,6 @@ data ChainDbEnv m blk = CDB , cdbChainSelFuse :: !(Fuse m) , cdbTracer :: !(Tracer m (TraceEvent blk)) , cdbRegistry :: !(ResourceRegistry m) - -- ^ Resource registry that will be used to (re)start the background - -- threads, see 'cdbBgThreads'. , cdbGcDelay :: !DiffTime -- ^ How long to wait between copying a block from the VolatileDB to -- ImmutableDB and garbage collecting it from the VolatileDB @@ -268,21 +267,21 @@ instance (IOLike m, LedgerSupportsProtocol blk, BlockSupportsDiffusionPipelining -------------------------------------------------------------------------------} data Internal m blk = Internal - { intCopyToImmutableDB :: m (WithOrigin SlotNo) + { intCopyToImmutableDB :: m (WithOrigin SlotNo) -- ^ Copy the blocks older than @k@ from to the VolatileDB to the -- ImmutableDB and update the in-memory chain fragment correspondingly. -- -- The 'SlotNo' of the tip of the ImmutableDB after copying the blocks is -- returned. This can be used for a garbage collection on the VolatileDB. - , intGarbageCollect :: SlotNo -> m () + , intGarbageCollect :: SlotNo -> m () -- ^ Perform garbage collection for blocks <= the given 'SlotNo'. - , intUpdateLedgerSnapshots :: m () + , intTryTakeSnapshot :: m () -- ^ Write a new LedgerDB snapshot to disk and remove the oldest one(s). - , intAddBlockRunner :: m Void + , intAddBlockRunner :: m Void -- ^ Start the loop that adds blocks to the ChainDB retrieved from the -- queue populated by 'ChainDB.addBlock'. Execute this loop in a separate -- thread. - , intKillBgThreads :: StrictTVar m (m ()) + , intKillBgThreads :: StrictTVar m (m ()) -- ^ A handle to kill the background threads. } @@ -507,21 +506,19 @@ closeChainSelQueue (ChainSelQueue queue) = do -- | Trace type for the various events of the ChainDB. data TraceEvent blk - = TraceAddBlockEvent (TraceAddBlockEvent blk) - | TraceFollowerEvent (TraceFollowerEvent blk) - | TraceCopyToImmutableDBEvent (TraceCopyToImmutableDBEvent blk) - | TraceGCEvent (TraceGCEvent blk) - | TraceInitChainSelEvent (TraceInitChainSelEvent blk) - | TraceOpenEvent (TraceOpenEvent blk) - | TraceIteratorEvent (TraceIteratorEvent blk) - | TraceSnapshotEvent (LgrDB.TraceSnapshotEvent blk) - | TraceLedgerReplayEvent (LgrDB.TraceReplayEvent blk) - | TraceImmutableDBEvent (ImmutableDB.TraceEvent blk) - | TraceVolatileDBEvent (VolatileDB.TraceEvent blk) + = TraceAddBlockEvent (TraceAddBlockEvent blk) + | TraceFollowerEvent (TraceFollowerEvent blk) + | TraceCopyToImmutableDBEvent (TraceCopyToImmutableDBEvent blk) + | TraceGCEvent (TraceGCEvent blk) + | TraceInitChainSelEvent (TraceInitChainSelEvent blk) + | TraceOpenEvent (TraceOpenEvent blk) + | TraceIteratorEvent (TraceIteratorEvent blk) + | TraceLedgerDBEvent (TraceLedgerDBEvent blk) + | TraceImmutableDBEvent (ImmutableDB.TraceEvent blk) + | TraceVolatileDBEvent (VolatileDB.TraceEvent blk) | TraceLastShutdownUnclean deriving (Generic) - deriving instance ( Eq (Header blk) , LedgerSupportsProtocol blk @@ -703,7 +700,7 @@ data TraceValidationEvent blk = -- | A candidate chain was valid. | ValidCandidate (AnchoredFragment (Header blk)) - | UpdateLedgerDbTraceEvent (UpdateLedgerDbTraceEvent blk) + | UpdateLedgerDbTraceEvent (TraceValidateEvent blk) deriving (Generic) deriving instance diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/ChainDB/Init.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/ChainDB/Init.hs index 5c2db3fa4a..0c140aed3e 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/ChainDB/Init.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/ChainDB/Init.hs @@ -24,11 +24,11 @@ data InitChainDB m blk = InitChainDB { addBlock :: blk -> m () -- | Return the current ledger state - , getCurrentLedger :: m (LedgerState blk) + , getCurrentLedger :: m (LedgerState blk EmptyMK) } fromFull :: - (IsLedger (LedgerState blk), IOLike m) + IOLike m => ChainDB m blk -> InitChainDB m blk fromFull db = InitChainDB { addBlock = @@ -40,7 +40,7 @@ fromFull db = InitChainDB { map :: Functor m => (blk' -> blk) - -> (LedgerState blk -> LedgerState blk') + -> (LedgerState blk EmptyMK -> LedgerState blk' EmptyMK) -> InitChainDB m blk -> InitChainDB m blk' map f g db = InitChainDB { addBlock = addBlock db . f diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/Common.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/Common.hs index 6da304ff11..985755b219 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/Common.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/Common.hs @@ -153,7 +153,6 @@ data BlockComponent blk a where GetHash :: BlockComponent blk (HeaderHash blk) GetSlot :: BlockComponent blk SlotNo GetIsEBB :: BlockComponent blk IsEBB - -- TODO: use `SizeInBytes` rather than Word32 GetBlockSize :: BlockComponent blk SizeInBytes GetHeaderSize :: BlockComponent blk Word16 GetNestedCtxt :: BlockComponent blk (SomeSecond (NestedCtxt Header) blk) diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/ImmutableDB/Impl.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/ImmutableDB/Impl.hs index d4b4716b84..290d5f6134 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/ImmutableDB/Impl.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/ImmutableDB/Impl.hs @@ -143,6 +143,9 @@ data ImmutableDbArgs f m blk = ImmutableDbArgs { -- 'Ouroboros.Consensus.Storage.Common.GetVerifiedBlock' components when -- extracting them from the ImmutableDB. , immCheckIntegrity :: HKD f (blk -> Bool) + -- ^ Predicate to check for integrity of + -- 'Ouroboros.Consensus.Storage.Common.GetVerifiedBlock' components when + -- extracting them from the ImmutableDB. , immChunkInfo :: HKD f ChunkInfo , immCodecConfig :: HKD f (CodecConfig blk) , immHasFS :: HKD f (SomeHasFS m) @@ -151,6 +154,8 @@ data ImmutableDbArgs f m blk = ImmutableDbArgs { -- | Which chunks of the ImmutableDB to validate on opening: all chunks, or -- only the most recent chunk? , immValidationPolicy :: ValidationPolicy + -- ^ Which chunks of the ImmutableDB to validate on opening: all chunks, or + -- only the most recent chunk? } -- | Default arguments diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/ImmutableDB/Impl/Stream.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/ImmutableDB/Impl/Stream.hs new file mode 100644 index 0000000000..e722d66bcb --- /dev/null +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/ImmutableDB/Impl/Stream.hs @@ -0,0 +1,116 @@ +{-# LANGUAGE LambdaCase #-} +{-# LANGUAGE RankNTypes #-} +{-# LANGUAGE RecordWildCards #-} +{-# LANGUAGE ScopedTypeVariables #-} + +module Ouroboros.Consensus.Storage.ImmutableDB.Impl.Stream ( + NextItem (..) + , StreamAPI (..) + , streamAPI + , streamAPI' + , streamAll + ) where + +import Control.Monad.Except +import Control.ResourceRegistry +import GHC.Stack +import Ouroboros.Consensus.Block +import Ouroboros.Consensus.Storage.Common +import Ouroboros.Consensus.Storage.ImmutableDB hiding (streamAll) +import qualified Ouroboros.Consensus.Storage.ImmutableDB.API as ImmutableDB +import Ouroboros.Consensus.Util.IOLike + +{------------------------------------------------------------------------------- + Abstraction over the streaming API provided by the Chain DB +-------------------------------------------------------------------------------} + +-- | Next block returned during streaming +data NextItem blk = NoMoreItems | NextItem blk | NextBlock blk + +-- | Stream blocks from the immutable DB +-- +-- When we initialize the ledger DB, we try to find a snapshot close to the +-- tip of the immutable DB, and then stream blocks from the immutable DB to its +-- tip to bring the ledger up to date with the tip of the immutable DB. +-- +-- In CPS form to enable the use of 'withXYZ' style iterator init functions. +newtype StreamAPI m blk a = StreamAPI { + -- | Start streaming after the specified block + streamAfter :: forall b. HasCallStack + => Point blk + -- Reference to the block corresponding to the snapshot we found + -- (or 'GenesisPoint' if we didn't find any) + + -> (Either (RealPoint blk) (m (NextItem a)) -> m b) + -- Get the next block (by value) + -- + -- Should be @Left pt@ if the snapshot we found is more recent than the + -- tip of the immutable DB. Since we only store snapshots to disk for + -- blocks in the immutable DB, this can only happen if the immutable DB + -- got truncated due to disk corruption. The returned @pt@ is a + -- 'RealPoint', not a 'Point', since it must always be possible to + -- stream after genesis. + -> m b + } + +-- | Stream all blocks +streamAll :: + forall m blk e b a. (Monad m, HasCallStack) + => StreamAPI m blk b + -> Point blk -- ^ Starting point for streaming + -> (RealPoint blk -> e) -- ^ Error when tip not found + -> a -- ^ Starting point when tip /is/ found + -> (b -> a -> m a) -- ^ Update function for each block + -> ExceptT e m a +streamAll StreamAPI{..} tip notFound e f = ExceptT $ + streamAfter tip $ \case + Left tip' -> return $ Left (notFound tip') + + Right getNext -> do + let go :: a -> m a + go a = do mNext <- getNext + case mNext of + NoMoreItems -> return a + NextItem b -> go =<< f b a + -- This is here only to silence the non-exhaustiveness + -- check but it will never be matched + NextBlock b -> go =<< f b a + Right <$> go e + + +streamAPI :: + (IOLike m, HasHeader blk) + => ImmutableDB m blk -> StreamAPI m blk blk +streamAPI = streamAPI' (return . NextItem) GetBlock + +streamAPI' :: + forall m blk a. + (IOLike m, HasHeader blk) + => (a -> m (NextItem a)) -- ^ Stop condition + -> BlockComponent blk a + -> ImmutableDB m blk + -> StreamAPI m blk a +streamAPI' shouldStop blockComponent immutableDB = StreamAPI streamAfter + where + streamAfter :: Point blk + -> (Either (RealPoint blk) (m (NextItem a)) -> m b) + -> m b + streamAfter tip k = withRegistry $ \registry -> do + eItr <- + ImmutableDB.streamAfterPoint + immutableDB + registry + blockComponent + tip + case eItr of + -- Snapshot is too recent + Left err -> k $ Left $ ImmutableDB.missingBlockPoint err + Right itr -> k $ Right $ streamUsing itr + + streamUsing :: ImmutableDB.Iterator m blk a + -> m (NextItem a) + streamUsing itr = do + itrResult <- ImmutableDB.iteratorNext itr + case itrResult of + ImmutableDB.IteratorExhausted -> return NoMoreItems + ImmutableDB.IteratorResult b -> shouldStop b diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/LedgerDB.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/LedgerDB.hs index 7e970703d9..28a2f3c63a 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/LedgerDB.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/LedgerDB.hs @@ -1,187 +1,77 @@ --- | The Ledger DB is responsible for the following tasks: --- --- - __Maintaining the in-memory ledger state at the tip__: When we try to --- extend our chain with a new block fitting onto our tip, the block must --- first be validated using the right ledger state, i.e., the ledger state --- corresponding to the tip. --- --- - __Maintaining the past \(k\) in-memory ledger states__: we might roll back --- up to \(k\) blocks when switching to a more preferable fork. Consider the --- example below: --- --- <> --- --- Our current chain's tip is \(C_2\), but the fork containing blocks --- with tags \(F_1\), \(F_2\), and \(F_3\) is more preferable. We roll back --- our chain to the intersection point of the two chains, \(I\), which must --- be not more than \(k\) blocks back from our current tip. Next, we must --- validate block \(F_1\) using the ledger state at block \(I\), after which --- we can validate \(F_2\) using the resulting ledger state, and so on. --- --- This means that we need access to all ledger states of the past \(k\) --- blocks, i.e., the ledger states corresponding to the volatile part of the --- current chain. Note that applying a block to a ledger state is not an --- invertible operation, so it is not possible to simply /unapply/ \(C_1\) --- and \(C_2\) to obtain \(I\). --- --- Access to the last \(k\) ledger states is not only needed for validating --- candidate chains, but also by the: --- --- - __Local state query server__: To query any of the past \(k\) ledger --- states. --- --- - __Chain sync client__: To validate headers of a chain that intersects --- with any of the past \(k\) blocks. --- --- - __Storing snapshots on disk__: To obtain a ledger state for the current tip --- of the chain, one has to apply /all blocks in the chain/ one-by-one to --- the initial ledger state. When starting up the system with an on-disk --- chain containing millions of blocks, all of them would have to be read --- from disk and applied. This process can take hours, depending on the --- storage and CPU speed, and is thus too costly to perform on each startup. --- --- For this reason, a recent snapshot of the ledger state should be --- periodically written to disk. Upon the next startup, that snapshot can be --- read and used to restore the current ledger state, as well as the past --- volatile \(k\) ledger states. --- --- === __(image code)__ --- >>> import Image.LaTeX.Render --- >>> import Control.Monad --- >>> import System.Directory --- >>> --- >>> createDirectoryIfMissing True "docs/haddocks/" --- >>> :{ --- >>> either (error . show) pure =<< --- >>> renderToFile "docs/haddocks/ledgerdb-switch.svg" defaultEnv (tikz ["positioning", "arrows"]) "\ --- >>> \ \\draw (0, 0) -- (50pt, 0) coordinate (I);\ --- >>> \ \\draw (I) -- ++(20pt, 20pt) coordinate (C1) -- ++(20pt, 0) coordinate (C2);\ --- >>> \ \\draw (I) -- ++(20pt, -20pt) coordinate (F1) -- ++(20pt, 0) coordinate (F2) -- ++(20pt, 0) coordinate (F3);\ --- >>> \ \\node at (I) {$\\bullet$};\ --- >>> \ \\node at (C1) {$\\bullet$};\ --- >>> \ \\node at (C2) {$\\bullet$};\ --- >>> \ \\node at (F1) {$\\bullet$};\ --- >>> \ \\node at (F2) {$\\bullet$};\ --- >>> \ \\node at (F3) {$\\bullet$};\ --- >>> \ \\node at (I) [above left] {$I$};\ --- >>> \ \\node at (C1) [above] {$C_1$};\ --- >>> \ \\node at (C2) [above] {$C_2$};\ --- >>> \ \\node at (F1) [below] {$F_1$};\ --- >>> \ \\node at (F2) [below] {$F_2$};\ --- >>> \ \\node at (F3) [below] {$F_3$};\ --- >>> \ \\draw (60pt, 50pt) node {$\\overbrace{\\hspace{60pt}}$};\ --- >>> \ \\draw (60pt, 60pt) node[fill=white] {$k$};\ --- >>> \ \\draw [dashed] (30pt, -40pt) -- (30pt, 45pt);" --- >>> :} --- +{-# LANGUAGE FlexibleContexts #-} +{-# LANGUAGE GADTs #-} +{-# LANGUAGE KindSignatures #-} +{-# LANGUAGE RankNTypes #-} +{-# LANGUAGE ScopedTypeVariables #-} module Ouroboros.Consensus.Storage.LedgerDB ( - -- * LedgerDB - Checkpoint (..) - , LedgerDB (..) - , LedgerDB' - , LedgerDbCfg (..) - , configLedgerDb - -- * Initialization - , InitLog (..) - , ReplayStart (..) - , initLedgerDB - -- * Trace - , ReplayGoal (..) - , TraceReplayEvent (..) - , decorateReplayTracerWithGoal - , decorateReplayTracerWithStart - -- * Querying - , ledgerDbAnchor - , ledgerDbCurrent - , ledgerDbIsSaturated - , ledgerDbMaxRollback - , ledgerDbPast - , ledgerDbSnapshots - , ledgerDbTip - -- * Updates - -- ** Construct - , ledgerDbWithAnchor - -- ** Applying blocks - , AnnLedgerError (..) - , AnnLedgerError' - , Ap (..) - , ExceededRollback (..) - , ThrowsLedgerError (..) - , defaultThrowLedgerErrors - -- ** Block resolution - , ResolveBlock - , ResolvesBlocks (..) - , defaultResolveBlocks - -- ** Operations - , defaultResolveWithErrors - , ledgerDbBimap - , ledgerDbPrune - , ledgerDbPush - , ledgerDbSwitch - -- ** Pure API - , ledgerDbPush' - , ledgerDbPushMany' - , ledgerDbSwitch' - -- ** Trace - , PushGoal (..) - , PushStart (..) - , Pushing (..) - , UpdateLedgerDbTraceEvent (..) - -- * Snapshots - , DiskSnapshot (..) - -- ** Read from disk - , SnapshotFailure (..) - , diskSnapshotIsTemporary - , listSnapshots - , readSnapshot - -- ** Write to disk - , takeSnapshot - , trimSnapshots - , writeSnapshot - -- ** Low-level API (primarily exposed for testing) - , decodeSnapshotBackwardsCompatible - , deleteSnapshot - , encodeSnapshot - , snapshotToFileName - , snapshotToPath - -- ** Trace - , TraceSnapshotEvent (..) - -- * Disk policy - , DiskPolicy (..) - , DiskPolicyArgs (..) - , NumOfDiskSnapshots (..) - , SnapshotInterval (..) - , TimeSinceLast (..) - , defaultDiskPolicyArgs - , mkDiskPolicy + -- * API + module Ouroboros.Consensus.Storage.LedgerDB.API + , module Ouroboros.Consensus.Storage.LedgerDB.API.Config + , module Ouroboros.Consensus.Storage.LedgerDB.Impl.Common + -- * Impl + , openDB ) where -import Ouroboros.Consensus.Storage.LedgerDB.DiskPolicy - (DiskPolicy (..), DiskPolicyArgs (..), - NumOfDiskSnapshots (..), SnapshotInterval (..), - TimeSinceLast (..), defaultDiskPolicyArgs, mkDiskPolicy) -import Ouroboros.Consensus.Storage.LedgerDB.Init (InitLog (..), - ReplayGoal (..), ReplayStart (..), TraceReplayEvent (..), - decorateReplayTracerWithGoal, - decorateReplayTracerWithStart, initLedgerDB) -import Ouroboros.Consensus.Storage.LedgerDB.LedgerDB (Checkpoint (..), - LedgerDB (..), LedgerDB', LedgerDbCfg (..), configLedgerDb) -import Ouroboros.Consensus.Storage.LedgerDB.Query (ledgerDbAnchor, - ledgerDbCurrent, ledgerDbIsSaturated, ledgerDbMaxRollback, - ledgerDbPast, ledgerDbSnapshots, ledgerDbTip) -import Ouroboros.Consensus.Storage.LedgerDB.Snapshots - (DiskSnapshot (..), SnapshotFailure (..), - TraceSnapshotEvent (..), decodeSnapshotBackwardsCompatible, - deleteSnapshot, diskSnapshotIsTemporary, encodeSnapshot, - listSnapshots, readSnapshot, snapshotToFileName, - snapshotToPath, takeSnapshot, trimSnapshots, writeSnapshot) -import Ouroboros.Consensus.Storage.LedgerDB.Update - (AnnLedgerError (..), AnnLedgerError', Ap (..), - ExceededRollback (..), PushGoal (..), PushStart (..), - Pushing (..), ResolveBlock, ResolvesBlocks (..), - ThrowsLedgerError (..), UpdateLedgerDbTraceEvent (..), - defaultResolveBlocks, defaultResolveWithErrors, - defaultThrowLedgerErrors, ledgerDbBimap, ledgerDbPrune, - ledgerDbPush, ledgerDbPush', ledgerDbPushMany', - ledgerDbSwitch, ledgerDbSwitch', ledgerDbWithAnchor) +import Control.Monad.Base +import Data.Word +import Ouroboros.Consensus.Block +import Ouroboros.Consensus.HardFork.Abstract +import Ouroboros.Consensus.Ledger.Inspect +import Ouroboros.Consensus.Ledger.SupportsProtocol +import Ouroboros.Consensus.Storage.ImmutableDB.Impl.Stream +import Ouroboros.Consensus.Storage.LedgerDB.API +import Ouroboros.Consensus.Storage.LedgerDB.API.Config +import Ouroboros.Consensus.Storage.LedgerDB.Impl.Args +import Ouroboros.Consensus.Storage.LedgerDB.Impl.Common +import qualified Ouroboros.Consensus.Storage.LedgerDB.Impl.Init as Init +import Ouroboros.Consensus.Storage.LedgerDB.Impl.Validate +import qualified Ouroboros.Consensus.Storage.LedgerDB.V1.Init as V1 +import qualified Ouroboros.Consensus.Storage.LedgerDB.V2.Init as V2 +import Ouroboros.Consensus.Util.Args +import Ouroboros.Consensus.Util.CallStack +import Ouroboros.Consensus.Util.IOLike + +openDB :: + forall m blk. + ( IOLike m + , MonadBase m m + , LedgerSupportsProtocol blk + , LedgerDbSerialiseConstraints blk + , InspectLedger blk + , HasCallStack + , HasHardForkHistory blk + ) + => Complete LedgerDbArgs m blk + -- ^ Stateless initializaton arguments + -> StreamAPI m blk blk + -- ^ Stream source for blocks. + -- + -- After reading a snapshot from disk, the ledger DB will be brought up to + -- date with the tip of this steam of blocks. The corresponding ledger state + -- can then be used as the starting point for chain selection in the ChainDB + -- driver. + -> Point blk + -- ^ The Replay goal i.e. the tip of the stream of blocks. + -> ResolveBlock m blk + -- ^ How to get blocks from the ChainDB + -> m (LedgerDB' m blk, Word64) +openDB + args + stream + replayGoal + getBlock = case lgrFlavorArgs args of + LedgerDbFlavorArgsV1 bss -> + let initDb = V1.mkInitDb + args + bss + getBlock + in + Init.openDB args initDb stream replayGoal + LedgerDbFlavorArgsV2 bss -> + let initDb = V2.mkInitDb + args + bss + getBlock + in + Init.openDB args initDb stream replayGoal diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/LedgerDB/API.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/LedgerDB/API.hs new file mode 100644 index 0000000000..bf502ef21d --- /dev/null +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/LedgerDB/API.hs @@ -0,0 +1,572 @@ +{-# LANGUAGE CPP #-} +{-# LANGUAGE ConstraintKinds #-} +{-# LANGUAGE DataKinds #-} +{-# LANGUAGE DeriveAnyClass #-} +{-# LANGUAGE DeriveGeneric #-} +{-# LANGUAGE DerivingVia #-} +{-# LANGUAGE FlexibleContexts #-} +{-# LANGUAGE FlexibleInstances #-} +{-# LANGUAGE GADTs #-} +{-# LANGUAGE GeneralizedNewtypeDeriving #-} +{-# LANGUAGE LambdaCase #-} +{-# LANGUAGE MultiParamTypeClasses #-} +{-# LANGUAGE PolyKinds #-} +{-# LANGUAGE QuantifiedConstraints #-} +{-# LANGUAGE RankNTypes #-} +{-# LANGUAGE ScopedTypeVariables #-} +{-# LANGUAGE StandaloneKindSignatures #-} +{-# LANGUAGE TypeFamilies #-} +{-# LANGUAGE TypeOperators #-} +{-# LANGUAGE UndecidableInstances #-} + +-- | The Ledger DB is responsible for the following tasks: +-- +-- - __Maintaining the in-memory ledger state at the tip__: When we try to +-- extend our chain with a new block fitting onto our tip, the block must +-- first be validated using the right ledger state, i.e., the ledger state +-- corresponding to the tip. +-- +-- - __Maintaining the past \(k\) in-memory ledger states__: we might roll back +-- up to \(k\) blocks when switching to a more preferable fork. Consider the +-- example below: +-- +-- <> +-- +-- Our current chain's tip is \(C_2\), but the fork containing blocks +-- \(F_1\), \(F_2\), and \(F_3\) is more preferable. We roll back our chain +-- to the intersection point of the two chains, \(I\), which must be not +-- more than \(k\) blocks back from our current tip. Next, we must validate +-- block \(F_1\) using the ledger state at block \(I\), after which we can +-- validate \(F_2\) using the resulting ledger state, and so on. +-- +-- This means that we need access to all ledger states of the past \(k\) +-- blocks, i.e., the ledger states corresponding to the volatile part of the +-- current chain. Note that applying a block to a ledger state is not an +-- invertible operation, so it is not possible to simply /unapply/ \(C_1\) +-- and \(C_2\) to obtain \(I\). +-- +-- Access to the last \(k\) ledger states is not only needed for validating +-- candidate chains, but also by the: +-- +-- - __Local state query server__: To query any of the past \(k\) ledger +-- states. +-- +-- - __Chain sync client__: To validate headers of a chain that intersects +-- with any of the past \(k\) blocks. +-- +-- - __Providing 'Ouroboros.Consensus.Ledger.Tables.Basics.LedgerTable's at any of the last \(k\) ledger states__: To apply blocks or transactions on top +-- of ledger states, the LedgerDB must be able to provide the appropriate +-- ledger tables at any of those ledger states. +-- +-- - __Storing snapshots on disk__: To obtain a ledger state for the current tip +-- of the chain, one has to apply /all blocks in the chain/ one-by-one to +-- the initial ledger state. When starting up the system with an on-disk +-- chain containing millions of blocks, all of them would have to be read +-- from disk and applied. This process can take hours, depending on the +-- storage and CPU speed, and is thus too costly to perform on each startup. +-- +-- For this reason, a recent snapshot of the ledger state should be +-- periodically written to disk. Upon the next startup, that snapshot can be +-- read and used to restore the current ledger state, as well as the past +-- \(k\) ledger states. +-- +-- - __Flushing 'LedgerTable' differences__: The running Consensus has to +-- periodically flush chunks of [differences]("Data.Map.Diff.Strict") +-- from the 'DbChangelog' to the 'BackingStore', so that memory is +-- off-loaded to the backing store, and if the backing store is an on-disk +-- implementation, reduce the memory usage. +-- +-- Note that whenever we say /ledger state/ we mean the @'ExtLedgerState' blk +-- mk@ type described in "Ouroboros.Consensus.Ledger.Basics". +-- +-- === __(image code)__ +-- >>> import Image.LaTeX.Render +-- >>> import Control.Monad +-- >>> import System.Directory +-- >>> +-- >>> createDirectoryIfMissing True "docs/haddocks/" +-- >>> :{ +-- >>> either (error . show) pure =<< +-- >>> renderToFile "docs/haddocks/ledgerdb-switch.svg" defaultEnv (tikz ["positioning", "arrows"]) "\ +-- >>> \ \\draw (0, 0) -- (50pt, 0) coordinate (I);\ +-- >>> \ \\draw (I) -- ++(20pt, 20pt) coordinate (C1) -- ++(20pt, 0) coordinate (C2);\ +-- >>> \ \\draw (I) -- ++(20pt, -20pt) coordinate (F1) -- ++(20pt, 0) coordinate (F2) -- ++(20pt, 0) coordinate (F3);\ +-- >>> \ \\node at (I) {$\\bullet$};\ +-- >>> \ \\node at (C1) {$\\bullet$};\ +-- >>> \ \\node at (C2) {$\\bullet$};\ +-- >>> \ \\node at (F1) {$\\bullet$};\ +-- >>> \ \\node at (F2) {$\\bullet$};\ +-- >>> \ \\node at (F3) {$\\bullet$};\ +-- >>> \ \\node at (I) [above left] {$I$};\ +-- >>> \ \\node at (C1) [above] {$C_1$};\ +-- >>> \ \\node at (C2) [above] {$C_2$};\ +-- >>> \ \\node at (F1) [below] {$F_1$};\ +-- >>> \ \\node at (F2) [below] {$F_2$};\ +-- >>> \ \\node at (F3) [below] {$F_3$};\ +-- >>> \ \\draw (60pt, 50pt) node {$\\overbrace{\\hspace{60pt}}$};\ +-- >>> \ \\draw (60pt, 60pt) node[fill=white] {$k$};\ +-- >>> \ \\draw [dashed] (30pt, -40pt) -- (30pt, 45pt);" +-- >>> :} +-- +module Ouroboros.Consensus.Storage.LedgerDB.API ( + -- * Main API + LedgerDB (..) + , LedgerDB' + , TestInternals (..) + , TestInternals' + , currentPoint + -- * Exceptions + , LedgerDbError (..) + -- * Forker + , ExceededRollback (..) + , Forker (..) + , Forker' + , ForkerKey (..) + , GetForkerError (..) + , RangeQuery (..) + , RangeQueryPrevious (..) + , Statistics (..) + , forkerCurrentPoint + , getReadOnlyForker + , getTipStatistics + , readLedgerTablesAtFor + , withPrivateTipForker + , withTipForker + -- ** Read-only forkers + , ReadOnlyForker (..) + , ReadOnlyForker' + , readOnlyForker + -- * Snapshots + , SnapCounters (..) + -- * Validation + , ValidateResult (..) + , ValidateResult' + -- ** Annotated ledger errors + , AnnLedgerError (..) + , AnnLedgerError' + -- * Tracing + -- ** Validation events + , PushGoal (..) + , PushStart (..) + , Pushing (..) + , TraceValidateEvent (..) + -- ** Forker events + , TraceForkerEvent (..) + , TraceForkerEventWithKey (..) + ) where + +import Control.Monad (forM) +import Control.Monad.Class.MonadTime.SI +import Control.ResourceRegistry +import Data.Kind +import Data.Set (Set) +import Data.Word +import GHC.Generics +import NoThunks.Class +import Ouroboros.Consensus.Block +import Ouroboros.Consensus.HeaderStateHistory +import Ouroboros.Consensus.Ledger.Abstract +import Ouroboros.Consensus.Ledger.Extended +import Ouroboros.Consensus.Storage.ChainDB.Impl.BlockCache +import Ouroboros.Consensus.Storage.LedgerDB.Impl.Snapshots +import Ouroboros.Consensus.Util.CallStack +import Ouroboros.Consensus.Util.IOLike +import Ouroboros.Network.Protocol.LocalStateQuery.Type + +{------------------------------------------------------------------------------- + Main API +-------------------------------------------------------------------------------} + +-- | The core API of the LedgerDB component +type LedgerDB :: (Type -> Type) -> LedgerStateKind -> Type -> Type +data LedgerDB m l blk = LedgerDB { + -- | Get the empty ledger state at the (volatile) tip of the LedgerDB. + getVolatileTip :: STM m (l EmptyMK) + -- | Get the empty ledger state at the immutable tip of the LedgerDB. + , getImmutableTip :: STM m (l EmptyMK) + -- | Get an empty ledger state at a requested point in the LedgerDB, if it + -- exists. + , getPastLedgerState :: Point blk -> STM m (Maybe (l EmptyMK)) + -- | Get the header state history for all ledger states in the LedgerDB. + , getHeaderStateHistory :: + (l ~ ExtLedgerState blk) + => STM m (HeaderStateHistory blk) + -- | Acquire a 'Forker' at the tip. + , getForkerAtWellKnownPoint :: + ResourceRegistry m +#if __GLASGOW_HASKELL__ >= 902 + -- ^ The producer/consumer registry. +#endif + -> Target (Point blk) + -> m (Forker m l blk) + -- | Acquire a 'Forker' at the requested point. If a ledger state associated + -- with the requested point does not exist in the LedgerDB, it will return a + -- 'GetForkerError'. + , getForkerAtPoint :: + ResourceRegistry m +#if __GLASGOW_HASKELL__ >= 902 + -- ^ The producer/consumer registry. +#endif + -> Point blk + -> m (Either GetForkerError (Forker m l blk)) + , validate :: + (l ~ ExtLedgerState blk) + => ResourceRegistry m +#if __GLASGOW_HASKELL__ >= 902 + -- ^ The producer/consumer registry. +#endif + -> (TraceValidateEvent blk -> m ()) + -> BlockCache blk + -> Word64 + -> [Header blk] + -> m (ValidateResult m l blk) + -- | Get the references to blocks that have previously been applied. + , getPrevApplied :: STM m (Set (RealPoint blk)) + -- | Garbage collect references to old blocks that have been previously + -- applied. + , garbageCollect :: SlotNo -> STM m () + -- | If the provided arguments indicate so (based on the DiskPolicy with + -- which this LedgerDB was opened), take a snapshot and delete stale ones. + , tryTakeSnapshot :: + (l ~ ExtLedgerState blk) + => Maybe (Time, Time) +#if __GLASGOW_HASKELL__ >= 902 + -- ^ If a snapshot has been taken already, the time at which it was + -- taken and the current time. +#endif + -> Word64 +#if __GLASGOW_HASKELL__ >= 902 + -- ^ How many blocks have been processed since the last snapshot. +#endif + -> m SnapCounters + -- | Flush in-memory LedgerDB state to disk, if possible. This is a no-op + -- for implementations that do not need an explicit flush function. + , tryFlush :: m () + -- | Close the ChainDB + -- + -- Idempotent. + -- + -- Should only be called on shutdown. + , closeDB :: m () + } + deriving NoThunks via OnlyCheckWhnfNamed "LedgerDB" (LedgerDB m l blk) + +type instance HeaderHash (LedgerDB m l blk) = HeaderHash blk + +type LedgerDB' m blk = LedgerDB m (ExtLedgerState blk) blk + +currentPoint :: + (GetTip l, HeaderHash l ~ HeaderHash blk, Functor (STM m)) + => LedgerDB m l blk + -> STM m (Point blk) +currentPoint ldb = castPoint . getTip <$> getVolatileTip ldb + +data TestInternals m l blk = TestInternals { + wipeLedgerDB :: m () + , takeSnapshotNOW :: Maybe DiskSnapshot -> m () + , reapplyThenPushNOW :: blk -> m () + , truncateSnapshots :: m () + , closeLedgerDB :: m () + } + deriving NoThunks via OnlyCheckWhnfNamed "TestInternals" (TestInternals m l blk) + +type TestInternals' m blk = TestInternals m (ExtLedgerState blk) blk + +{------------------------------------------------------------------------------- + Exceptions +-------------------------------------------------------------------------------} + +-- | Database error +-- +-- Thrown upon incorrect use: invalid input. +data LedgerDbError blk = + -- | The LedgerDB is closed. + -- + -- This will be thrown when performing some operations on the LedgerDB. The + -- 'CallStack' of the operation on the LedgerDB is included in the error. + ClosedDBError PrettyCallStack + -- | A Forker is closed. + | ClosedForkerError ForkerKey PrettyCallStack + deriving (Show) + deriving anyclass (Exception) + +{------------------------------------------------------------------------------- + Forker +-------------------------------------------------------------------------------} + +-- | An independent handle to a point in the LedgerDB, which can be advanced to +-- evaluate forks in the chain. +type Forker :: (Type -> Type) -> LedgerStateKind -> Type -> Type +data Forker m l blk = Forker { + -- | Close the current forker (idempotent). + -- + -- Other functions on forkers should throw a 'ClosedForkError' once the + -- forker is closed. + -- + -- Note: always use this functions before the forker is forgotten! + -- Otherwise, cleanup of (on-disk) state might not be prompt or guaranteed. + -- + -- This function should release any resources that are held by the forker, + -- and not by the LedgerDB. + forkerClose :: !(m ()) + + -- Queries + + -- | Read ledger tables from disk. + , forkerReadTables :: !(LedgerTables l KeysMK -> m (LedgerTables l ValuesMK)) + -- | Range-read ledger tables from disk. + , forkerRangeReadTables :: !(RangeQueryPrevious l -> m (LedgerTables l ValuesMK)) + -- | Get the full ledger state without tables. + -- + -- If empty ledger state is all you need, use 'getVolatileTip', + -- 'getImmutableTip', or 'getPastLedgerState' instead. + , forkerGetLedgerState :: !(STM m (l EmptyMK)) + -- | Get statistics about the current state of the handle if possible. + -- + -- Returns 'Nothing' if the implementation is backed by @lsm-tree@. + , forkerReadStatistics :: !(m (Maybe Statistics)) + + -- Updates + + -- | Advance the fork handle by pushing a new ledger state to the tip of the + -- current fork. + , forkerPush :: !(l DiffMK -> m ()) + -- | Commit the fork, which was constructed using 'forkerPush', as the + -- current version of the LedgerDB. + , forkerCommit :: !(STM m ()) + } + +-- | An identifier for a 'Forker'. See 'ldbForkers'. +newtype ForkerKey = ForkerKey Word16 + deriving stock (Show, Eq, Ord) + deriving newtype (Enum, NoThunks, Num) + +type instance HeaderHash (Forker m l blk) = HeaderHash l + +type Forker' m blk = Forker m (ExtLedgerState blk) blk + +instance (GetTip l, HeaderHash l ~ HeaderHash blk, MonadSTM m) + => GetTipSTM m (Forker m l blk) where + getTipSTM forker = castPoint . getTip <$> forkerGetLedgerState forker + +data RangeQueryPrevious l = NoPreviousQuery | PreviousQueryWasFinal | PreviousQueryWasUpTo (Key l) + +data RangeQuery l = RangeQuery { + rqPrev :: !(RangeQueryPrevious l) + , rqCount :: !Int + } + +-- TODO: document +newtype Statistics = Statistics { + ledgerTableSize :: Int + } + +-- | Errors that can be thrown while acquiring forkers. +data GetForkerError = + -- | The requested point was not found in the LedgerDB, but the point is + -- recent enough that the point is not in the immutable part of the chain + PointNotOnChain + -- | The requested point was not found in the LedgerDB because the point is + -- in the immutable part of the chain. + | PointTooOld + deriving (Show, Eq) + +-- | Exceeded maximum rollback supported by the current ledger DB state +-- +-- Under normal circumstances this will not arise. It can really only happen +-- in the presence of data corruption (or when switching to a shorter fork, +-- but that is disallowed by all currently known Ouroboros protocols). +-- +-- Records both the supported and the requested rollback. +data ExceededRollback = ExceededRollback { + rollbackMaximum :: Word64 + , rollbackRequested :: Word64 + } + +forkerCurrentPoint :: + (GetTip l, HeaderHash l ~ HeaderHash blk, Functor (STM m)) + => Forker m l blk + -> STM m (Point blk) +forkerCurrentPoint forker = + castPoint + . getTip + <$> forkerGetLedgerState forker + +-- | 'bracket'-style usage of a forker at the LedgerDB tip. +withTipForker :: + IOLike m + => LedgerDB m l blk + -> ResourceRegistry m + -> (Forker m l blk -> m a) -> m a +withTipForker ldb rr = bracket (getForkerAtWellKnownPoint ldb rr VolatileTip) forkerClose + +-- | Like 'withTipForker', but it uses a private registry to allocate and +-- de-allocate the forker. +withPrivateTipForker :: + IOLike m + => LedgerDB m l blk + -> (Forker m l blk -> m a) -> m a +withPrivateTipForker ldb = bracketWithPrivateRegistry (\rr -> getForkerAtWellKnownPoint ldb rr VolatileTip) forkerClose + +-- | Get statistics from the tip of the LedgerDB. +getTipStatistics :: + IOLike m + => LedgerDB m l blk + -> m (Maybe Statistics) +getTipStatistics ldb = withPrivateTipForker ldb forkerReadStatistics + +{------------------------------------------------------------------------------- + Read-only forkers +-------------------------------------------------------------------------------} + +-- | Read-only 'Forker'. +-- +-- These forkers are not allowed to commit. They are used everywhere except in +-- Chain Selection. In particular they are now used in: +-- +-- - LocalStateQuery server, via 'getReadOnlyForkerAtPoint' +-- +-- - Forging loop. +-- +-- - Mempool. +type ReadOnlyForker :: (Type -> Type) -> LedgerStateKind -> Type -> Type +data ReadOnlyForker m l blk = ReadOnlyForker { + -- | See 'forkerClose' + roforkerClose :: !(m ()) + -- | See 'forkerReadTables' + , roforkerReadTables :: !(LedgerTables l KeysMK -> m (LedgerTables l ValuesMK)) + -- | See 'forkerRangeReadTables'. + , roforkerRangeReadTables :: !(RangeQueryPrevious l -> m (LedgerTables l ValuesMK)) + -- | See 'forkerGetLedgerState' + , roforkerGetLedgerState :: !(STM m (l EmptyMK)) + -- | See 'forkerReadStatistics' + , roforkerReadStatistics :: !(m (Maybe Statistics)) + } + +type instance HeaderHash (ReadOnlyForker m l blk) = HeaderHash l + +type ReadOnlyForker' m blk = ReadOnlyForker m (ExtLedgerState blk) blk + +readOnlyForker :: Forker m l blk -> ReadOnlyForker m l blk +readOnlyForker forker = ReadOnlyForker { + roforkerClose = forkerClose forker + , roforkerReadTables = forkerReadTables forker + , roforkerRangeReadTables = forkerRangeReadTables forker + , roforkerGetLedgerState = forkerGetLedgerState forker + , roforkerReadStatistics = forkerReadStatistics forker + } + +getReadOnlyForker :: + MonadSTM m + => LedgerDB m l blk + -> ResourceRegistry m + -> Target (Point blk) + -> m (Either GetForkerError (ReadOnlyForker m l blk)) +getReadOnlyForker ldb rr = \case + VolatileTip -> Right . readOnlyForker <$> getForkerAtWellKnownPoint ldb rr VolatileTip + SpecificPoint pt -> fmap readOnlyForker <$> getForkerAtPoint ldb rr pt + ImmutableTip -> Right . readOnlyForker <$> getForkerAtWellKnownPoint ldb rr ImmutableTip + +-- | Read a table of values at the requested point via a 'ReadOnlyForker' +readLedgerTablesAtFor :: + IOLike m + => LedgerDB m l blk + -> Point blk + -> LedgerTables l KeysMK + -> m (Either GetForkerError (LedgerTables l ValuesMK)) +readLedgerTablesAtFor ldb p ks = + bracketWithPrivateRegistry + (\rr -> fmap readOnlyForker <$> getForkerAtPoint ldb rr p) + (mapM_ roforkerClose) + $ \foEith -> do + forM foEith $ \fo -> do + fo `roforkerReadTables` ks + +{------------------------------------------------------------------------------- + Snapshots +-------------------------------------------------------------------------------} + +-- | Counters to keep track of when we made the last snapshot. +data SnapCounters = SnapCounters { + -- | When was the last time we made a snapshot + prevSnapshotTime :: !(Maybe Time) + -- | How many blocks have we processed since the last snapshot + , ntBlocksSinceLastSnap :: !Word64 + } + +{------------------------------------------------------------------------------- + Validation +-------------------------------------------------------------------------------} + +-- | When validating a sequence of blocks, these are the possible outcomes. +data ValidateResult m l blk = + ValidateSuccessful (Forker m l blk) + | ValidateLedgerError (AnnLedgerError m l blk) + | ValidateExceededRollBack ExceededRollback + +type ValidateResult' m blk = ValidateResult m (ExtLedgerState blk) blk + +{------------------------------------------------------------------------------- + An annotated ledger error +-------------------------------------------------------------------------------} + +-- | Annotated ledger errors +data AnnLedgerError m l blk = AnnLedgerError { + -- | The ledger DB just /before/ this block was applied + annLedgerState :: Forker m l blk + + -- | Reference to the block that had the error + , annLedgerErrRef :: RealPoint blk + + -- | The ledger error itself + , annLedgerErr :: LedgerErr l + } + +type AnnLedgerError' m blk = AnnLedgerError m (ExtLedgerState blk) blk + +{------------------------------------------------------------------------------- + Trace validation events +-------------------------------------------------------------------------------} + +newtype PushStart blk = PushStart { unPushStart :: RealPoint blk } + deriving (Show, Eq) + +newtype PushGoal blk = PushGoal { unPushGoal :: RealPoint blk } + deriving (Show, Eq) + +newtype Pushing blk = Pushing { unPushing :: RealPoint blk } + deriving (Show, Eq) + +data TraceValidateEvent blk = + -- | Event fired when we are about to push a block to a forker + StartedPushingBlockToTheLedgerDb + !(PushStart blk) + -- ^ Point from which we started pushing new blocks + (PushGoal blk) + -- ^ Point to which we are updating the ledger, the last event + -- StartedPushingBlockToTheLedgerDb will have Pushing and PushGoal + -- wrapping over the same RealPoint + !(Pushing blk) + -- ^ Point which block we are about to push + deriving (Show, Eq, Generic) + +{------------------------------------------------------------------------------- + Forker events +-------------------------------------------------------------------------------} + +data TraceForkerEventWithKey = + TraceForkerEventWithKey ForkerKey TraceForkerEvent + deriving (Show, Eq) + +data TraceForkerEvent = + ForkerOpen + | ForkerCloseUncommitted + | ForkerCloseCommitted + | ForkerReadTablesStart + | ForkerReadTablesEnd + | ForkerRangeReadTablesStart + | ForkerRangeReadTablesEnd + | ForkerReadStatistics + | ForkerPushStart + | ForkerPushEnd + deriving (Show, Eq) diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/LedgerDB/API/Config.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/LedgerDB/API/Config.hs new file mode 100644 index 0000000000..c5285c3138 --- /dev/null +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/LedgerDB/API/Config.hs @@ -0,0 +1,35 @@ +{-# LANGUAGE DeriveAnyClass #-} +{-# LANGUAGE DeriveGeneric #-} +{-# LANGUAGE FlexibleContexts #-} +{-# LANGUAGE StandaloneDeriving #-} +{-# LANGUAGE UndecidableInstances #-} + +module Ouroboros.Consensus.Storage.LedgerDB.API.Config ( + LedgerDbCfg (..) + , configLedgerDb + ) where + +import GHC.Generics +import NoThunks.Class +import Ouroboros.Consensus.Block.Abstract +import Ouroboros.Consensus.Config +import Ouroboros.Consensus.Ledger.Basics +import Ouroboros.Consensus.Ledger.Extended +import Ouroboros.Consensus.Protocol.Abstract + +data LedgerDbCfg l = LedgerDbCfg { + ledgerDbCfgSecParam :: !SecurityParam + , ledgerDbCfg :: !(LedgerCfg l) + } + deriving (Generic) + +deriving instance NoThunks (LedgerCfg l) => NoThunks (LedgerDbCfg l) + +configLedgerDb :: + ConsensusProtocol (BlockProtocol blk) + => TopLevelConfig blk + -> LedgerDbCfg (ExtLedgerState blk) +configLedgerDb config = LedgerDbCfg { + ledgerDbCfgSecParam = configSecurityParam config + , ledgerDbCfg = ExtLedgerCfg config + } diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/LedgerDB/DiskPolicy.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/LedgerDB/DiskPolicy.hs deleted file mode 100644 index 8b25ed1b68..0000000000 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/LedgerDB/DiskPolicy.hs +++ /dev/null @@ -1,148 +0,0 @@ -{-# LANGUAGE DataKinds #-} -{-# LANGUAGE DeriveFunctor #-} -{-# LANGUAGE DeriveGeneric #-} -{-# LANGUAGE DerivingVia #-} -{-# LANGUAGE NumericUnderscores #-} -{-# LANGUAGE RecordWildCards #-} - -module Ouroboros.Consensus.Storage.LedgerDB.DiskPolicy ( - DiskPolicy (..) - , DiskPolicyArgs (..) - , NumOfDiskSnapshots (..) - , SnapshotInterval (..) - , TimeSinceLast (..) - , defaultDiskPolicyArgs - , mkDiskPolicy - ) where - -import Control.Monad.Class.MonadTime.SI -import Data.Time.Clock (secondsToDiffTime) -import Data.Word -import GHC.Generics -import NoThunks.Class (NoThunks, OnlyCheckWhnf (..)) -import Ouroboros.Consensus.Config.SecurityParam - --- | Length of time, requested by the user, that has to pass after which --- a snapshot is taken. It can be: --- --- 1. either explicitly provided by user in seconds --- 2. or default value can be requested - the specific DiskPolicy determines --- what that is exactly, see `mkDiskPolicy` as an example -data SnapshotInterval = - DefaultSnapshotInterval - | RequestedSnapshotInterval DiffTime - deriving stock (Eq, Generic, Show) - --- | Number of snapshots to be stored on disk. This is either the default value --- as determined by the DiskPolicy, or it is provided by the user. See the --- `DiskPolicy` documentation for more information. -data NumOfDiskSnapshots = - DefaultNumOfDiskSnapshots - | RequestedNumOfDiskSnapshots Word - deriving stock (Eq, Generic, Show) - -data DiskPolicyArgs = DiskPolicyArgs SnapshotInterval NumOfDiskSnapshots - --- | On-disk policy --- --- We only write ledger states that are older than @k@ blocks to disk (that is, --- snapshots that are guaranteed valid). The on-disk policy determines how often --- we write to disk and how many checkpoints we keep. -data DiskPolicy = DiskPolicy { - -- | How many snapshots do we want to keep on disk? - -- - -- A higher number of on-disk snapshots is primarily a safe-guard against - -- disk corruption: it trades disk space for reliability. - -- - -- Examples: - -- - -- * @0@: Delete the snapshot immediately after writing. - -- Probably not a useful value :-D - -- * @1@: Delete the previous snapshot immediately after writing the next - -- Dangerous policy: if for some reason the deletion happens before - -- the new snapshot is written entirely to disk (we don't @fsync@), - -- we have no choice but to start at the genesis snapshot on the - -- next startup. - -- * @2@: Always keep 2 snapshots around. This means that when we write - -- the next snapshot, we delete the oldest one, leaving the middle - -- one available in case of truncation of the write. This is - -- probably a sane value in most circumstances. - onDiskNumSnapshots :: Word - - -- | Should we write a snapshot of the ledger state to disk? - -- - -- This function is passed two bits of information: - -- - -- * The time since the last snapshot, or 'NoSnapshotTakenYet' if none was taken yet. - -- Note that 'NoSnapshotTakenYet' merely means no snapshot had been taking yet - -- since the node was started; it does not necessarily mean that none - -- exist on disk. - -- - -- * The distance in terms of blocks applied to the /oldest/ ledger - -- snapshot in memory. During normal operation, this is the number of - -- blocks written to the ImmutableDB since the last snapshot. On - -- startup, it is computed by counting how many immutable blocks we had - -- to reapply to get to the chain tip. This is useful, as it allows the - -- policy to decide to take a snapshot /on node startup/ if a lot of - -- blocks had to be replayed. - -- - -- See also 'mkDiskPolicy' - , onDiskShouldTakeSnapshot :: TimeSinceLast DiffTime -> Word64 -> Bool - } - deriving NoThunks via OnlyCheckWhnf DiskPolicy - -data TimeSinceLast time = NoSnapshotTakenYet | TimeSinceLast time - deriving (Functor, Show) - --- | Default on-disk policy arguments suitable to use with cardano-node --- -defaultDiskPolicyArgs :: DiskPolicyArgs -defaultDiskPolicyArgs = DiskPolicyArgs DefaultSnapshotInterval DefaultNumOfDiskSnapshots - -mkDiskPolicy :: SecurityParam -> DiskPolicyArgs -> DiskPolicy -mkDiskPolicy (SecurityParam k) (DiskPolicyArgs reqInterval reqNumOfSnapshots) = - DiskPolicy {..} - where - onDiskNumSnapshots :: Word - onDiskNumSnapshots = case reqNumOfSnapshots of - DefaultNumOfDiskSnapshots -> 2 - RequestedNumOfDiskSnapshots value -> value - - onDiskShouldTakeSnapshot :: - TimeSinceLast DiffTime - -> Word64 - -> Bool - onDiskShouldTakeSnapshot NoSnapshotTakenYet blocksSinceLast = - -- If users never leave their wallet running for long, this would mean - -- that under some circumstances we would never take a snapshot - -- So, on startup (when the 'time since the last snapshot' is `Nothing`), - -- we take a snapshot as soon as there are @k@ blocks replayed. - -- This means that even if users frequently shut down their wallet, we still - -- take a snapshot roughly every @k@ blocks. It does mean the possibility of - -- an extra unnecessary snapshot during syncing (if the node is restarted), but - -- that is not a big deal. - blocksSinceLast >= k - - onDiskShouldTakeSnapshot (TimeSinceLast timeSinceLast) blocksSinceLast = - timeSinceLast >= snapshotInterval - || substantialAmountOfBlocksWereProcessed blocksSinceLast timeSinceLast - - -- | We want to create a snapshot after a substantial amount of blocks were - -- processed (hard-coded to 50k blocks). Given the fact that during bootstrap - -- a fresh node will see a lot of blocks over a short period of time, we want - -- to limit this condition to happen not more often then a fixed amount of - -- time (here hard-coded to 6 minutes) - substantialAmountOfBlocksWereProcessed blocksSinceLast timeSinceLast = - let minBlocksBeforeSnapshot = 50_000 - minTimeBeforeSnapshot = 6 * secondsToDiffTime 60 - in blocksSinceLast >= minBlocksBeforeSnapshot - && timeSinceLast >= minTimeBeforeSnapshot - - -- | Requested snapshot interval can be explicitly provided by the - -- caller (RequestedSnapshotInterval) or the caller might request the default - -- snapshot interval (DefaultSnapshotInterval). If the latter then the - -- snapshot interval is defaulted to k * 2 seconds - when @k = 2160@ the interval - -- defaults to 72 minutes. - snapshotInterval = case reqInterval of - RequestedSnapshotInterval value -> value - DefaultSnapshotInterval -> secondsToDiffTime $ fromIntegral $ k * 2 diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/LedgerDB/Impl/Args.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/LedgerDB/Impl/Args.hs new file mode 100644 index 0000000000..0b59c68a51 --- /dev/null +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/LedgerDB/Impl/Args.hs @@ -0,0 +1,76 @@ +{-# LANGUAGE ConstraintKinds #-} +{-# LANGUAGE DataKinds #-} +{-# LANGUAGE FlexibleContexts #-} +{-# LANGUAGE FlexibleInstances #-} +{-# LANGUAGE GADTs #-} +{-# LANGUAGE PolyKinds #-} +{-# LANGUAGE QuantifiedConstraints #-} +{-# LANGUAGE RankNTypes #-} +{-# LANGUAGE ScopedTypeVariables #-} +{-# LANGUAGE StandaloneKindSignatures #-} +{-# LANGUAGE TypeFamilies #-} +{-# LANGUAGE UndecidableInstances #-} + +-- | Arguments for LedgerDB initialization. +module Ouroboros.Consensus.Storage.LedgerDB.Impl.Args ( + LedgerDbArgs (..) + , LedgerDbFlavorArgs (..) + , defaultArgs + ) where + +import Control.ResourceRegistry +import Control.Tracer +import Data.Kind +import Ouroboros.Consensus.Ledger.Abstract +import Ouroboros.Consensus.Ledger.Extended +import Ouroboros.Consensus.Storage.LedgerDB.API.Config +import Ouroboros.Consensus.Storage.LedgerDB.Impl.Common +import Ouroboros.Consensus.Storage.LedgerDB.Impl.Snapshots +import qualified Ouroboros.Consensus.Storage.LedgerDB.V1.Args as V1 +import qualified Ouroboros.Consensus.Storage.LedgerDB.V2.Args as V2 +import Ouroboros.Consensus.Util.Args +import System.FS.API + +{------------------------------------------------------------------------------- + Arguments +-------------------------------------------------------------------------------} + +-- | Arguments required to initialize a LedgerDB. +type LedgerDbArgs :: + (Type -> Type) + -> (Type -> Type) + -> Type + -> Type +data LedgerDbArgs f m blk = LedgerDbArgs { + lgrSnapshotPolicyArgs :: SnapshotPolicyArgs + , lgrGenesis :: HKD f (m (ExtLedgerState blk ValuesMK)) + , lgrHasFS :: HKD f (SomeHasFS m) + , lgrConfig :: HKD f (LedgerDbCfg (ExtLedgerState blk)) + , lgrTracer :: Tracer m (TraceLedgerDBEvent blk) + , lgrFlavorArgs :: LedgerDbFlavorArgs f m + , lgrRegistry :: HKD f (ResourceRegistry m) + -- | If provided, the ledgerdb will start using said snapshot and fallback + -- to genesis. It will ignore any other existing snapshots. Useful for + -- db-analyser. + , lgrStartSnapshot :: Maybe DiskSnapshot + } + +-- | Default arguments +defaultArgs :: + ( Applicative m + ) + => Incomplete LedgerDbArgs m blk +defaultArgs = LedgerDbArgs { + lgrSnapshotPolicyArgs = SnapshotPolicyArgs DefaultSnapshotInterval DefaultNumOfDiskSnapshots + , lgrGenesis = NoDefault + , lgrHasFS = NoDefault + , lgrConfig = NoDefault + , lgrTracer = nullTracer + , lgrFlavorArgs = LedgerDbFlavorArgsV2 (V2.V2Args V2.InMemoryHandleArgs) + , lgrRegistry = NoDefault + , lgrStartSnapshot = Nothing + } + +data LedgerDbFlavorArgs f m = + LedgerDbFlavorArgsV1 (V1.LedgerDbFlavorArgs f m) + | LedgerDbFlavorArgsV2 (V2.LedgerDbFlavorArgs f m) diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/LedgerDB/Impl/Common.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/LedgerDB/Impl/Common.hs new file mode 100644 index 0000000000..1f89c75d30 --- /dev/null +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/LedgerDB/Impl/Common.hs @@ -0,0 +1,133 @@ +{-# LANGUAGE ConstraintKinds #-} +{-# LANGUAGE DeriveGeneric #-} +{-# LANGUAGE FlexibleContexts #-} +{-# LANGUAGE PolyKinds #-} +{-# LANGUAGE StandaloneDeriving #-} +{-# LANGUAGE TypeFamilies #-} + +-- | Some minor stuff that is (currently) common to all implementations + +module Ouroboros.Consensus.Storage.LedgerDB.Impl.Common ( + -- * Serialise + LedgerDbSerialiseConstraints + -- * Tracing + , FlavorImplSpecificTrace (..) + , ReplayGoal (..) + , ReplayStart (..) + , TraceLedgerDBEvent (..) + , TraceReplayEvent (..) + , TraceReplayProgressEvent (..) + , TraceReplayStartEvent (..) + , decorateReplayTracerWithGoal + , decorateReplayTracerWithStart + ) where + +import Codec.Serialise (Serialise) +import Control.Tracer +import Data.Functor.Contravariant ((>$<)) +import GHC.Generics +import Ouroboros.Consensus.Block +import Ouroboros.Consensus.HeaderValidation +import Ouroboros.Consensus.Ledger.Abstract +import Ouroboros.Consensus.Ledger.Inspect +import Ouroboros.Consensus.Protocol.Abstract +import Ouroboros.Consensus.Storage.LedgerDB.API +import Ouroboros.Consensus.Storage.LedgerDB.Impl.Snapshots +import qualified Ouroboros.Consensus.Storage.LedgerDB.V1.BackingStore as V1 +import qualified Ouroboros.Consensus.Storage.LedgerDB.V2.Args as V2 +import Ouroboros.Consensus.Storage.Serialisation + +-- | Serialization constraints required by the 'LedgerDB' to be properly +-- instantiated with a @blk@. +type LedgerDbSerialiseConstraints blk = + ( Serialise (HeaderHash blk) + , EncodeDisk blk (LedgerState blk EmptyMK) + , DecodeDisk blk (LedgerState blk EmptyMK) + , EncodeDisk blk (AnnTip blk) + , DecodeDisk blk (AnnTip blk) + , EncodeDisk blk (ChainDepState (BlockProtocol blk)) + , DecodeDisk blk (ChainDepState (BlockProtocol blk)) + , CanSerializeLedgerTables (LedgerState blk) + ) + +{------------------------------------------------------------------------------- + Tracing +-------------------------------------------------------------------------------} + +data FlavorImplSpecificTrace = + FlavorImplSpecificTraceV1 V1.FlavorImplSpecificTrace + | FlavorImplSpecificTraceV2 V2.FlavorImplSpecificTrace + deriving (Show, Eq) + +data TraceLedgerDBEvent blk = + LedgerDBSnapshotEvent !(TraceSnapshotEvent blk) + | LedgerReplayEvent !(TraceReplayEvent blk) + | LedgerDBForkerEvent !TraceForkerEventWithKey + | LedgerDBFlavorImplEvent !FlavorImplSpecificTrace + deriving (Generic) + +deriving instance + (StandardHash blk, InspectLedger blk) + => Show (TraceLedgerDBEvent blk) +deriving instance + (StandardHash blk, InspectLedger blk) + => Eq (TraceLedgerDBEvent blk) + +{------------------------------------------------------------------------------- + Trace replay events +-------------------------------------------------------------------------------} + +data TraceReplayEvent blk = + TraceReplayStartEvent (TraceReplayStartEvent blk) + | TraceReplayProgressEvent (TraceReplayProgressEvent blk) + deriving (Show, Eq) + +-- | Add the tip of the Immutable DB to the trace event +-- +-- Between the tip of the immutable DB and the point of the starting block, +-- the node could (if it so desired) easily compute a "percentage complete". +decorateReplayTracerWithGoal + :: Point blk -- ^ Tip of the ImmutableDB + -> Tracer m (TraceReplayProgressEvent blk) + -> Tracer m (ReplayGoal blk -> TraceReplayProgressEvent blk) +decorateReplayTracerWithGoal immTip = (($ ReplayGoal immTip) >$<) + +-- | Add the block at which a replay started. +-- +-- This allows to compute a "percentage complete" when tracing the events. +decorateReplayTracerWithStart + :: Point blk -- ^ Starting point of the replay + -> Tracer m (ReplayGoal blk -> TraceReplayProgressEvent blk) + -> Tracer m (ReplayStart blk -> ReplayGoal blk -> TraceReplayProgressEvent blk) +decorateReplayTracerWithStart start = (($ ReplayStart start) >$<) + +-- | Which point the replay started from +newtype ReplayStart blk = ReplayStart (Point blk) deriving (Eq, Show) + +-- | Which point the replay is expected to end at +newtype ReplayGoal blk = ReplayGoal (Point blk) deriving (Eq, Show) + +-- | Events traced while replaying blocks against the ledger to bring it up to +-- date w.r.t. the tip of the ImmutableDB during initialisation. As this +-- process takes a while, we trace events to inform higher layers of our +-- progress. +data TraceReplayStartEvent blk + = -- | There were no LedgerDB snapshots on disk, so we're replaying all blocks + -- starting from Genesis against the initial ledger. + ReplayFromGenesis + -- | There was a LedgerDB snapshot on disk corresponding to the given tip. + -- We're replaying more recent blocks against it. + | ReplayFromSnapshot + DiskSnapshot + (ReplayStart blk) -- ^ the block at which this replay started + deriving (Generic, Eq, Show) + +-- | We replayed the given block (reference) on the genesis snapshot during +-- the initialisation of the LedgerDB. Used during ImmutableDB replay. +data TraceReplayProgressEvent blk = + ReplayedBlock + (RealPoint blk) -- ^ the block being replayed + [LedgerEvent blk] + (ReplayStart blk) -- ^ the block at which this replay started + (ReplayGoal blk) -- ^ the block at the tip of the ImmutableDB + deriving (Generic, Eq, Show) diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/LedgerDB/Impl/Init.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/LedgerDB/Impl/Init.hs new file mode 100644 index 0000000000..078ad2b0ec --- /dev/null +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/LedgerDB/Impl/Init.hs @@ -0,0 +1,308 @@ +{-# LANGUAGE BangPatterns #-} +{-# LANGUAGE DataKinds #-} +{-# LANGUAGE DeriveGeneric #-} +{-# LANGUAGE FlexibleInstances #-} +{-# LANGUAGE GADTs #-} +{-# LANGUAGE NamedFieldPuns #-} +{-# LANGUAGE PolyKinds #-} +{-# LANGUAGE ScopedTypeVariables #-} +{-# LANGUAGE StandaloneKindSignatures #-} +{-# LANGUAGE TypeFamilies #-} + +-- | Logic for initializing the LedgerDB. +-- +-- Each implementation of the LedgerDB has to provide an instantiation of +-- 'InitDB'. See 'initialize' for a description of the initialization process. +module Ouroboros.Consensus.Storage.LedgerDB.Impl.Init ( + -- * Initialization interface + InitDB (..) + -- * Initialization logic + , InitLog (..) + , openDB + , openDBInternal + -- * Testing + , initialize + ) where + +import Control.Monad (when) +import Control.Monad.Except (ExceptT, runExceptT) +import Control.Tracer +import Data.Functor.Contravariant ((>$<)) +import Data.Kind (Type) +import Data.Word +import GHC.Generics hiding (from) +import Ouroboros.Consensus.Block +import Ouroboros.Consensus.Ledger.Abstract +import Ouroboros.Consensus.Ledger.Extended +import Ouroboros.Consensus.Ledger.Inspect +import Ouroboros.Consensus.Ledger.SupportsProtocol +import Ouroboros.Consensus.Storage.ImmutableDB.Impl.Stream +import Ouroboros.Consensus.Storage.LedgerDB.API +import Ouroboros.Consensus.Storage.LedgerDB.API.Config +import Ouroboros.Consensus.Storage.LedgerDB.Impl.Args +import Ouroboros.Consensus.Storage.LedgerDB.Impl.Common +import Ouroboros.Consensus.Storage.LedgerDB.Impl.Snapshots +import Ouroboros.Consensus.Util.Args +import Ouroboros.Consensus.Util.CallStack +import Ouroboros.Consensus.Util.IOLike +import Ouroboros.Network.Block +import System.FS.API + +{------------------------------------------------------------------------------- + Initialization +-------------------------------------------------------------------------------} + +-- | Initialization log +-- +-- The initialization log records which snapshots from disk were considered, +-- in which order, and why some snapshots were rejected. It is primarily useful +-- for monitoring purposes. +data InitLog blk = + -- | Defaulted to initialization from genesis + -- + -- NOTE: Unless the blockchain is near genesis, or this is the first time we + -- boot the node, we should see this /only/ if data corruption occurred. + InitFromGenesis + + -- | Used a snapshot corresponding to the specified tip + | InitFromSnapshot DiskSnapshot (RealPoint blk) + + -- | Initialization skipped a snapshot + -- + -- We record the reason why it was skipped. + -- + -- NOTE: We should /only/ see this if data corruption occurred. + | InitFailure DiskSnapshot (SnapshotFailure blk) (InitLog blk) + deriving (Show, Eq, Generic) + +-- | Functions required to initialize a LedgerDB +type InitDB :: Type -> (Type -> Type) -> Type -> Type +data InitDB db m blk = InitDB { + initFromGenesis :: !(m db) + -- ^ Create a DB from the genesis state + , initFromSnapshot :: !(DiskSnapshot -> m (Either (SnapshotFailure blk) (db, RealPoint blk))) + -- ^ Create a DB from a Snapshot + , closeDb :: !(db -> m ()) + -- ^ Closing the database, to be reopened again with a different snapshot or + -- with the genesis state. + , initReapplyBlock :: !(LedgerDbCfg (ExtLedgerState blk) -> blk -> db -> m db) + -- ^ Reapply a block from the immutable DB when initializing the DB. + , currentTip :: !(db -> LedgerState blk EmptyMK) + -- ^ Getting the current tip for tracing the Ledger Events. + , mkLedgerDb :: !(db -> m (LedgerDB m (ExtLedgerState blk) blk, TestInternals m (ExtLedgerState blk) blk)) + -- ^ Create a LedgerDB from the initialized data structures from previous + -- steps. + } + +-- | Initialize the ledger DB from the most recent snapshot on disk +-- +-- If no such snapshot can be found, use the genesis ledger DB. Returns the +-- initialized DB as well as a log of the initialization and the number of +-- blocks replayed between the snapshot and the tip of the immutable DB. +-- +-- We do /not/ catch any exceptions thrown during streaming; should any be +-- thrown, it is the responsibility of the 'ChainDB' to catch these +-- and trigger (further) validation. We only discard snapshots if +-- +-- * We cannot deserialise them, or +-- +-- * they are /ahead/ of the chain, they refer to a slot which is later than the +-- last slot in the immutable db. +-- +-- Note that after initialization, the ledger db should be pruned so that no +-- ledger states are considered volatile. Otherwise we would be able to rollback +-- the immutable DB. +-- +-- We do /not/ attempt to use multiple ledger states from disk to construct the +-- ledger DB. Instead we load only a /single/ ledger state from disk, and +-- /compute/ all subsequent ones. This is important, because the ledger states +-- obtained in this way will (hopefully) share much of their memory footprint +-- with their predecessors. +initialize :: + forall m blk db. + ( IOLike m + , LedgerSupportsProtocol blk + , InspectLedger blk + , HasCallStack + ) + => Tracer m (TraceReplayEvent blk) + -> Tracer m (TraceSnapshotEvent blk) + -> SomeHasFS m + -> LedgerDbCfg (ExtLedgerState blk) + -> StreamAPI m blk blk + -> Point blk + -> InitDB db m blk + -> Maybe DiskSnapshot + -> m (InitLog blk, db, Word64) +initialize replayTracer + snapTracer + hasFS + cfg + stream + replayGoal + dbIface + fromSnapshot = + case fromSnapshot of + Nothing -> listSnapshots hasFS >>= tryNewestFirst id + Just snap -> tryNewestFirst id [snap] + where + InitDB {initFromGenesis, initFromSnapshot, closeDb} = dbIface + + tryNewestFirst :: (InitLog blk -> InitLog blk) + -> [DiskSnapshot] + -> m ( InitLog blk + , db + , Word64 + ) + tryNewestFirst acc [] = do + -- We're out of snapshots. Start at genesis + traceWith (TraceReplayStartEvent >$< replayTracer) ReplayFromGenesis + let replayTracer'' = decorateReplayTracerWithStart (Point Origin) replayTracer' + initDb <- initFromGenesis + eDB <- runExceptT $ replayStartingWith + replayTracer'' + cfg + stream + initDb + (Point Origin) + dbIface + + case eDB of + Left err -> do + closeDb initDb + error $ "Invariant violation: invalid immutable chain " <> show err + Right (db, replayed) -> do + return ( acc InitFromGenesis + , db + , replayed + ) + + tryNewestFirst acc (s:ss) = do + eInitDb <- initFromSnapshot s + case eInitDb of + Left err -> do + when (diskSnapshotIsTemporary s || err == InitFailureGenesis) $ + deleteSnapshot hasFS s + traceWith snapTracer . InvalidSnapshot s $ err + tryNewestFirst (acc . InitFailure s err) ss + Right (initDb, pt) -> do + let pt' = realPointToPoint pt + traceWith (TraceReplayStartEvent >$< replayTracer) (ReplayFromSnapshot s (ReplayStart pt')) + let replayTracer'' = decorateReplayTracerWithStart pt' replayTracer' + eDB <- runExceptT + $ replayStartingWith + replayTracer'' + cfg + stream + initDb + pt' + dbIface + case eDB of + Left err -> do + traceWith snapTracer . InvalidSnapshot s $ err + when (diskSnapshotIsTemporary s) $ deleteSnapshot hasFS s + closeDb initDb + tryNewestFirst (acc . InitFailure s err) ss + Right (db, replayed) -> do + return (acc (InitFromSnapshot s pt), db, replayed) + + replayTracer' = decorateReplayTracerWithGoal + replayGoal + (TraceReplayProgressEvent >$< replayTracer) + +-- | Replay all blocks in the Immutable database using the 'StreamAPI' provided +-- on top of the given @LedgerDB' blk@. +-- +-- It will also return the number of blocks that were replayed. +replayStartingWith :: + forall m blk db. ( + IOLike m + , LedgerSupportsProtocol blk + , InspectLedger blk + , HasCallStack + ) + => Tracer m (ReplayStart blk -> ReplayGoal blk -> TraceReplayProgressEvent blk) + -> LedgerDbCfg (ExtLedgerState blk) + -> StreamAPI m blk blk + -> db + -> Point blk + -> InitDB db m blk + -> ExceptT (SnapshotFailure blk) m (db, Word64) +replayStartingWith tracer cfg stream initDb from InitDB{initReapplyBlock, currentTip} = do + streamAll stream from + InitFailureTooRecent + (initDb, 0) + push + where + push :: blk + -> (db, Word64) + -> m (db, Word64) + push blk (!db, !replayed) = do + !db' <- initReapplyBlock cfg blk db + + let !replayed' = replayed + 1 + + events = inspectLedger + (getExtLedgerCfg (ledgerDbCfg cfg)) + (currentTip db) + (currentTip db') + + traceWith tracer (ReplayedBlock (blockRealPoint blk) events) + return (db', replayed') + +{------------------------------------------------------------------------------- + Opening a LedgerDB +-------------------------------------------------------------------------------} + +openDB :: + forall m blk db. ( IOLike m + , LedgerSupportsProtocol blk + , InspectLedger blk + , HasCallStack + ) + => Complete LedgerDbArgs m blk + -> InitDB db m blk + -> StreamAPI m blk blk + -> Point blk + -> m (LedgerDB' m blk, Word64) +openDB args initDb stream replayGoal = + f <$> openDBInternal args initDb stream replayGoal + where f (ldb, replayCounter, _) = (ldb, replayCounter) + +-- | Open the ledger DB and expose internals for testing purposes +openDBInternal :: + ( IOLike m + , LedgerSupportsProtocol blk + , InspectLedger blk + , HasCallStack + ) + => Complete LedgerDbArgs m blk + -> InitDB db m blk + -> StreamAPI m blk blk + -> Point blk + -> m (LedgerDB' m blk, Word64, TestInternals' m blk) +openDBInternal args@(LedgerDbArgs { lgrHasFS = SomeHasFS fs }) initDb stream replayGoal = do + createDirectoryIfMissing fs True (mkFsPath []) + (_initLog, db, replayCounter) <- + initialize + replayTracer + snapTracer + lgrHasFS + lgrConfig + stream + replayGoal + initDb + lgrStartSnapshot + (ledgerDb, internal) <- mkLedgerDb initDb db + return (ledgerDb, replayCounter, internal) + + where + LedgerDbArgs { + lgrConfig + , lgrTracer + , lgrHasFS + , lgrStartSnapshot + } = args + + replayTracer = LedgerReplayEvent >$< lgrTracer + snapTracer = LedgerDBSnapshotEvent >$< lgrTracer diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/LedgerDB/Impl/Snapshots.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/LedgerDB/Impl/Snapshots.hs new file mode 100644 index 0000000000..2566b43de1 --- /dev/null +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/LedgerDB/Impl/Snapshots.hs @@ -0,0 +1,406 @@ +{-# LANGUAGE DeriveGeneric #-} +{-# LANGUAGE DerivingVia #-} +{-# LANGUAGE FlexibleContexts #-} +{-# LANGUAGE NamedFieldPuns #-} +{-# LANGUAGE NumericUnderscores #-} +{-# LANGUAGE RankNTypes #-} +{-# LANGUAGE ScopedTypeVariables #-} +{-# LANGUAGE TypeApplications #-} + +-- | Common logic and types for LedgerDB Snapshots. +-- +-- Snapshots are saved copies of Ledger states in the chain which can be used to +-- restart the node without having to replay the whole chain. Regardless of the +-- actual LedgerDB implementation chosen, the general management of snapshots is +-- common to all implementations. +module Ouroboros.Consensus.Storage.LedgerDB.Impl.Snapshots ( + -- * Snapshots + DiskSnapshot (..) + , NumOfDiskSnapshots (..) + , SnapshotFailure (..) + , SnapshotPolicyArgs (..) + -- * Codec + , readExtLedgerState + , writeExtLedgerState + -- * Paths + , diskSnapshotIsTemporary + , snapshotFromPath + , snapshotToDirName + , snapshotToDirPath + -- * Management + , deleteSnapshot + , listSnapshots + , trimSnapshots + -- * Policy + , SnapshotInterval (..) + , SnapshotPolicy (..) + , defaultSnapshotPolicy + -- * Tracing + , TraceSnapshotEvent (..) + -- * Testing + , decodeLBackwardsCompatible + , encodeL + ) where + +import Codec.CBOR.Decoding +import Codec.CBOR.Encoding +import qualified Codec.CBOR.Write as CBOR +import qualified Codec.Serialise.Decoding as Dec +import Control.Monad +import Control.Monad.Class.MonadTime.SI +import Control.Monad.Except +import Control.Tracer +import qualified Data.List as List +import Data.Maybe (isJust, mapMaybe) +import Data.Ord +import Data.Set (Set) +import qualified Data.Set as Set +import Data.Time.Clock (secondsToDiffTime) +import Data.Word +import GHC.Generics +import NoThunks.Class +import Ouroboros.Consensus.Block +import Ouroboros.Consensus.Config +import Ouroboros.Consensus.Ledger.Abstract +import Ouroboros.Consensus.Ledger.Extended +import Ouroboros.Consensus.Util.CallStack +import Ouroboros.Consensus.Util.CBOR (ReadIncrementalErr, + decodeWithOrigin, readIncremental) +import Ouroboros.Consensus.Util.IOLike +import Ouroboros.Consensus.Util.Versioned +import System.FS.API +import System.FS.API.Lazy (hPut) +import Text.Read (readMaybe) + +data DiskSnapshot = DiskSnapshot { + -- | Snapshots are numbered. We will try the snapshots with the highest + -- number first. + -- + -- When creating a snapshot, we use the slot number of the ledger state it + -- corresponds to as the snapshot number. This gives an indication of how + -- recent the snapshot is. + -- + -- Note that the snapshot names are only indicative, we don't rely on the + -- snapshot number matching the slot number of the corresponding ledger + -- state. We only use the snapshots numbers to determine the order in + -- which we try them. + dsNumber :: Word64 + + -- | Snapshots can optionally have a suffix, separated by the snapshot + -- number with an underscore, e.g., @4492799_last_Byron@. This suffix acts + -- as metadata for the operator of the node. Snapshots with a suffix will + -- /not be trimmed/. + , dsSuffix :: Maybe String + } + deriving (Show, Eq, Ord, Generic) + +data SnapshotFailure blk = + -- | We failed to deserialise the snapshot + -- + -- This can happen due to data corruption in the ledger DB. + InitFailureRead ReadIncrementalErr + + -- | This snapshot is too recent (ahead of the tip of the immutable chain) + | InitFailureTooRecent (RealPoint blk) + + -- | This snapshot was of the ledger state at genesis, even though we never + -- take snapshots at genesis, so this is unexpected. + | InitFailureGenesis + deriving (Show, Eq, Generic) + +-- | Named snapshot are permanent, they will never be deleted even if failing to +-- deserialize. +diskSnapshotIsPermanent :: DiskSnapshot -> Bool +diskSnapshotIsPermanent = isJust . dsSuffix + +-- | The snapshots that are periodically created are temporary, they will be +-- deleted when trimming or if they fail to deserialize. +diskSnapshotIsTemporary :: DiskSnapshot -> Bool +diskSnapshotIsTemporary = not . diskSnapshotIsPermanent + +snapshotFromPath :: String -> Maybe DiskSnapshot +snapshotFromPath fileName = do + number <- readMaybe prefix + return $ DiskSnapshot number suffix' + where + (prefix, suffix) = break (== '_') fileName + + suffix' :: Maybe String + suffix' = case suffix of + "" -> Nothing + _ : str -> Just str + +-- | List on-disk snapshots, highest number first. +listSnapshots :: Monad m => SomeHasFS m -> m [DiskSnapshot] +listSnapshots (SomeHasFS HasFS{listDirectory}) = + aux <$> listDirectory (mkFsPath []) + where + aux :: Set String -> [DiskSnapshot] + aux = List.sortOn (Down . dsNumber) . mapMaybe snapshotFromPath . Set.toList + +-- | Delete snapshot from disk +deleteSnapshot :: (Monad m, HasCallStack) => SomeHasFS m -> DiskSnapshot -> m () +deleteSnapshot (SomeHasFS HasFS{doesDirectoryExist, removeDirectoryRecursive}) ss = do + let p = snapshotToDirPath ss + exists <- doesDirectoryExist p + when exists (removeDirectoryRecursive p) + +-- | Read an extended ledger state from disk +readExtLedgerState :: + forall m blk. IOLike m + => SomeHasFS m + -> (forall s. Decoder s (ExtLedgerState blk EmptyMK)) + -> (forall s. Decoder s (HeaderHash blk)) + -> FsPath + -> ExceptT ReadIncrementalErr m (ExtLedgerState blk EmptyMK) +readExtLedgerState hasFS decLedger decHash = do + ExceptT + . readIncremental hasFS decoder + where + decoder :: Decoder s (ExtLedgerState blk EmptyMK) + decoder = decodeLBackwardsCompatible (Proxy @blk) decLedger decHash + +-- | Write an extended ledger state to disk +writeExtLedgerState :: + forall m blk. MonadThrow m + => SomeHasFS m + -> (ExtLedgerState blk EmptyMK -> Encoding) + -> FsPath + -> ExtLedgerState blk EmptyMK + -> m () +writeExtLedgerState (SomeHasFS hasFS) encLedger path cs = do + withFile hasFS path (WriteMode MustBeNew) $ \h -> + void $ hPut hasFS h $ CBOR.toBuilder (encoder cs) + where + encoder :: ExtLedgerState blk EmptyMK -> Encoding + encoder = encodeL encLedger + +-- | Trim the number of on disk snapshots so that at most 'onDiskNumSnapshots' +-- snapshots are stored on disk. The oldest snapshots are deleted. +-- +-- The deleted snapshots are returned. +trimSnapshots :: + Monad m + => Tracer m (TraceSnapshotEvent r) + -> SomeHasFS m + -> SnapshotPolicy + -> m [DiskSnapshot] +trimSnapshots tracer fs SnapshotPolicy{onDiskNumSnapshots} = do + -- We only trim temporary snapshots + ss <- filter diskSnapshotIsTemporary <$> listSnapshots fs + -- The snapshot are most recent first, so we can simply drop from the + -- front to get the snapshots that are "too" old. + let ssTooOld = drop (fromIntegral onDiskNumSnapshots) ss + mapM + (\s -> do + deleteSnapshot fs s + traceWith tracer $ DeletedSnapshot s + pure s + ) + ssTooOld + +snapshotToDirName :: DiskSnapshot -> String +snapshotToDirName DiskSnapshot { dsNumber, dsSuffix } = + show dsNumber <> suffix + where + suffix = case dsSuffix of + Nothing -> "" + Just s -> "_" <> s + +-- | The path within the LedgerDB's filesystem to the snapshot's directory +snapshotToDirPath :: DiskSnapshot -> FsPath +snapshotToDirPath = mkFsPath . (:[]) . snapshotToDirName + +-- | Version 1: uses versioning ('Ouroboros.Consensus.Util.Versioned') and only +-- encodes the ledger state @l@. +snapshotEncodingVersion1 :: VersionNumber +snapshotEncodingVersion1 = 1 + +-- | Encoder to be used in combination with 'decodeSnapshotBackwardsCompatible'. +encodeL :: (l -> Encoding) -> l -> Encoding +encodeL encodeLedger l = + encodeVersion snapshotEncodingVersion1 (encodeLedger l) + +-- | To remain backwards compatible with existing snapshots stored on disk, we +-- must accept the old format as well as the new format. +-- +-- The old format: +-- +-- * The tip: @WithOrigin (RealPoint blk)@ +-- +-- * The chain length: @Word64@ +-- +-- * The ledger state: @l@ +-- +-- The new format is described by 'snapshotEncodingVersion1'. +-- +-- This decoder will accept and ignore them. The encoder ('encodeSnapshot') will +-- no longer encode them. +decodeLBackwardsCompatible :: + forall l blk. + Proxy blk + -> (forall s. Decoder s l) + -> (forall s. Decoder s (HeaderHash blk)) + -> forall s. Decoder s l +decodeLBackwardsCompatible _ decodeLedger decodeHash = + decodeVersionWithHook + decodeOldFormat + [(snapshotEncodingVersion1, Decode decodeVersion1)] + where + decodeVersion1 :: forall s. Decoder s l + decodeVersion1 = decodeLedger + + decodeOldFormat :: Maybe Int -> forall s. Decoder s l + decodeOldFormat (Just 3) = do + _ <- withOriginRealPointToPoint <$> + decodeWithOrigin (decodeRealPoint @blk decodeHash) + _ <- Dec.decodeWord64 + decodeLedger + decodeOldFormat mbListLen = + fail $ + "decodeSnapshotBackwardsCompatible: invalid start " <> + show mbListLen + +{------------------------------------------------------------------------------- + Policy +-------------------------------------------------------------------------------} + +-- | Length of time that has to pass after which a snapshot is taken. +data SnapshotInterval = + DefaultSnapshotInterval + | RequestedSnapshotInterval DiffTime + | DisableSnapshots + deriving stock (Eq, Generic, Show) + +-- | Number of snapshots to be stored on disk. This is either the default value +-- as determined by the DiskPolicy, or it is provided by the user. See the +-- `DiskPolicy` documentation for more information. +data NumOfDiskSnapshots = + DefaultNumOfDiskSnapshots + | RequestedNumOfDiskSnapshots Word + deriving stock (Eq, Generic, Show) + +-- | Snapshots policy +-- +-- We only write ledger states that are older than @k@ blocks to disk (that is, +-- snapshots that are guaranteed valid). The on-disk policy determines how often +-- we write to disk and how many checkpoints we keep. +data SnapshotPolicy = SnapshotPolicy { + -- | How many snapshots do we want to keep on disk? + -- + -- A higher number of on-disk snapshots is primarily a safe-guard against + -- disk corruption: it trades disk space for reliability. + -- + -- Examples: + -- + -- * @0@: Delete the snapshot immediately after writing. + -- Probably not a useful value :-D + -- * @1@: Delete the previous snapshot immediately after writing the next + -- Dangerous policy: if for some reason the deletion happens before + -- the new snapshot is written entirely to disk (we don't @fsync@), + -- we have no choice but to start at the genesis snapshot on the + -- next startup. + -- * @2@: Always keep 2 snapshots around. This means that when we write + -- the next snapshot, we delete the oldest one, leaving the middle + -- one available in case of truncation of the write. This is + -- probably a sane value in most circumstances. + onDiskNumSnapshots :: Word + + -- | Should we write a snapshot of the ledger state to disk? + -- + -- This function is passed two bits of information: + -- + -- * The time since the last snapshot, or 'NoSnapshotTakenYet' if none was taken yet. + -- Note that 'NoSnapshotTakenYet' merely means no snapshot had been taking yet + -- since the node was started; it does not necessarily mean that none + -- exist on disk. + -- + -- * The distance in terms of blocks applied to the /oldest/ ledger + -- snapshot in memory. During normal operation, this is the number of + -- blocks written to the ImmutableDB since the last snapshot. On + -- startup, it is computed by counting how many immutable blocks we had + -- to reapply to get to the chain tip. This is useful, as it allows the + -- policy to decide to take a snapshot /on node startup/ if a lot of + -- blocks had to be replayed. + -- + -- See also 'defaultSnapshotPolicy' + , onDiskShouldTakeSnapshot :: Maybe DiffTime -> Word64 -> Bool + } + deriving NoThunks via OnlyCheckWhnf SnapshotPolicy + +data SnapshotPolicyArgs = SnapshotPolicyArgs { + spaInterval :: SnapshotInterval + , spaNum :: NumOfDiskSnapshots + } + +-- | Default on-disk policy suitable to use with cardano-node +-- +defaultSnapshotPolicy :: + SecurityParam + -> SnapshotPolicyArgs + -> SnapshotPolicy +defaultSnapshotPolicy + (SecurityParam k) + (SnapshotPolicyArgs requestedInterval reqNumOfSnapshots) = + SnapshotPolicy { + onDiskNumSnapshots + , onDiskShouldTakeSnapshot + } + where + onDiskNumSnapshots :: Word + onDiskNumSnapshots = case reqNumOfSnapshots of + DefaultNumOfDiskSnapshots -> 2 + RequestedNumOfDiskSnapshots value -> value + + onDiskShouldTakeSnapshot :: + Maybe DiffTime + -> Word64 + -> Bool + onDiskShouldTakeSnapshot Nothing blocksSinceLast = + -- If users never leave their wallet running for long, this would mean + -- that under some circumstances we would never take a snapshot + -- So, on startup (when the 'time since the last snapshot' is `Nothing`), + -- we take a snapshot as soon as there are @k@ blocks replayed. + -- This means that even if users frequently shut down their wallet, we still + -- take a snapshot roughly every @k@ blocks. It does mean the possibility of + -- an extra unnecessary snapshot during syncing (if the node is restarted), but + -- that is not a big deal. + blocksSinceLast >= k + + onDiskShouldTakeSnapshot (Just timeSinceLast) blocksSinceLast = + snapshotInterval timeSinceLast + || substantialAmountOfBlocksWereProcessed blocksSinceLast timeSinceLast + + -- | We want to create a snapshot after a substantial amount of blocks were + -- processed (hard-coded to 50k blocks). Given the fact that during bootstrap + -- a fresh node will see a lot of blocks over a short period of time, we want + -- to limit this condition to happen not more often then a fixed amount of + -- time (here hard-coded to 6 minutes) + substantialAmountOfBlocksWereProcessed blocksSinceLast timeSinceLast = + let minBlocksBeforeSnapshot = 50_000 + minTimeBeforeSnapshot = 6 * secondsToDiffTime 60 + in blocksSinceLast >= minBlocksBeforeSnapshot + && timeSinceLast >= minTimeBeforeSnapshot + + -- | Requested snapshot interval can be explicitly provided by the + -- caller (RequestedSnapshotInterval) or the caller might request the default + -- snapshot interval (DefaultSnapshotInterval). If the latter then the + -- snapshot interval is defaulted to k * 2 seconds - when @k = 2160@ the interval + -- defaults to 72 minutes. + snapshotInterval t = case requestedInterval of + RequestedSnapshotInterval value -> t >= value + DefaultSnapshotInterval -> t >= secondsToDiffTime (fromIntegral $ k * 2) + DisableSnapshots -> False + +{------------------------------------------------------------------------------- + Tracing snapshot events +-------------------------------------------------------------------------------} + +data TraceSnapshotEvent blk + = InvalidSnapshot DiskSnapshot (SnapshotFailure blk) + -- ^ An on disk snapshot was skipped because it was invalid. + | TookSnapshot DiskSnapshot (RealPoint blk) + -- ^ A snapshot was written to disk. + | DeletedSnapshot DiskSnapshot + -- ^ An old or invalid on-disk snapshot was deleted + deriving (Generic, Eq, Show) diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/LedgerDB/Impl/Validate.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/LedgerDB/Impl/Validate.hs new file mode 100644 index 0000000000..ad0d16b071 --- /dev/null +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/LedgerDB/Impl/Validate.hs @@ -0,0 +1,301 @@ +{-# LANGUAGE ConstraintKinds #-} +{-# LANGUAGE DataKinds #-} +{-# LANGUAGE FlexibleInstances #-} +{-# LANGUAGE FunctionalDependencies #-} +{-# LANGUAGE GADTs #-} +{-# LANGUAGE LambdaCase #-} +{-# LANGUAGE QuantifiedConstraints #-} +{-# LANGUAGE RankNTypes #-} +{-# LANGUAGE ScopedTypeVariables #-} +{-# LANGUAGE StandaloneKindSignatures #-} +{-# LANGUAGE TypeOperators #-} +{-# LANGUAGE UndecidableInstances #-} + +module Ouroboros.Consensus.Storage.LedgerDB.Impl.Validate ( + -- * Find blocks + ResolveBlock + , ResolvesBlocks (..) + -- * Validation + , ValidLedgerState (..) + , validate + -- * Testing + , defaultResolveWithErrors + , defaultThrowLedgerErrors + ) where + +import Control.Monad (void) +import Control.Monad.Base +import Control.Monad.Except (ExceptT (..), MonadError (..), runExcept, + runExceptT) +import Control.Monad.Reader (ReaderT (..)) +import Control.Monad.Trans (MonadTrans (..)) +import Control.ResourceRegistry +import Data.Kind +import Data.Set (Set) +import qualified Data.Set as Set +import Data.Word +import Ouroboros.Consensus.Block +import Ouroboros.Consensus.Config +import Ouroboros.Consensus.Ledger.Abstract +import Ouroboros.Consensus.Ledger.Extended +import Ouroboros.Consensus.Ledger.SupportsProtocol +import Ouroboros.Consensus.Storage.ChainDB.Impl.BlockCache +import qualified Ouroboros.Consensus.Storage.ChainDB.Impl.BlockCache as BlockCache +import Ouroboros.Consensus.Storage.LedgerDB.API hiding (validate) +import Ouroboros.Consensus.Util.CallStack +import Ouroboros.Consensus.Util.IOLike + +{------------------------------------------------------------------------------- + Validation +-------------------------------------------------------------------------------} + +validate :: + forall m blk. ( + IOLike m + , LedgerSupportsProtocol blk + , HasCallStack + , MonadBase m m + ) + => ResolveBlock m blk + -> TopLevelConfig blk + -> ([RealPoint blk] -> STM m ()) + -> STM m (Set (RealPoint blk)) + -> (ResourceRegistry m -> Word64 -> m (Either ExceededRollback (Forker' m blk))) + -> ResourceRegistry m + -> (TraceValidateEvent blk -> m ()) + -> BlockCache blk + -> Word64 -- ^ How many blocks to roll back + -> [Header blk] + -> m (ValidateResult' m blk) +validate resolve config addPrevApplied prevApplied forkerAtFromTip rr trace blockCache numRollbacks hdrs = do + aps <- mkAps <$> atomically prevApplied + res <- fmap rewrap $ defaultResolveWithErrors resolve $ + switch + forkerAtFromTip + rr + (ExtLedgerCfg config) + numRollbacks + (lift . lift . trace) + aps + liftBase $ atomically $ addPrevApplied (validBlockPoints res (map headerRealPoint hdrs)) + return res + where + rewrap :: Either (AnnLedgerError' n blk) (Either ExceededRollback (Forker' n blk)) + -> ValidateResult' n blk + rewrap (Left e) = ValidateLedgerError e + rewrap (Right (Left e)) = ValidateExceededRollBack e + rewrap (Right (Right l)) = ValidateSuccessful l + + mkAps :: forall bn n l. l ~ ExtLedgerState blk + => Set (RealPoint blk) + -> [Ap bn n l blk ( ResolvesBlocks n blk + , ThrowsLedgerError bn n l blk + )] + mkAps prev = + [ case ( Set.member (headerRealPoint hdr) prev + , BlockCache.lookup (headerHash hdr) blockCache + ) of + (False, Nothing) -> ApplyRef (headerRealPoint hdr) + (True, Nothing) -> Weaken $ ReapplyRef (headerRealPoint hdr) + (False, Just blk) -> Weaken $ ApplyVal blk + (True, Just blk) -> Weaken $ ReapplyVal blk + | hdr <- hdrs + ] + + -- | Based on the 'ValidateResult', return the hashes corresponding to + -- valid blocks. + validBlockPoints :: forall n. ValidateResult' n blk -> [RealPoint blk] -> [RealPoint blk] + validBlockPoints = \case + ValidateExceededRollBack _ -> const [] + ValidateSuccessful _ -> id + ValidateLedgerError e -> takeWhile (/= annLedgerErrRef e) + +-- | Switch to a fork by rolling back a number of blocks and then pushing the +-- new blocks. +switch :: + (ApplyBlock l blk, MonadBase bm m, c, MonadSTM bm) + => (ResourceRegistry bm -> Word64 -> bm (Either ExceededRollback (Forker bm l blk))) + -> ResourceRegistry bm + -> LedgerCfg l + -> Word64 -- ^ How many blocks to roll back + -> (TraceValidateEvent blk -> m ()) + -> [Ap bm m l blk c] -- ^ New blocks to apply + -> m (Either ExceededRollback (Forker bm l blk)) +switch forkerAtFromTip rr cfg numRollbacks trace newBlocks = do + foEith <- liftBase $ forkerAtFromTip rr numRollbacks + case foEith of + Left rbExceeded -> pure $ Left rbExceeded + Right fo -> do + case newBlocks of + [] -> pure () + -- no blocks to apply to ledger state, return the forker + (firstBlock:_) -> do + let start = PushStart . toRealPoint $ firstBlock + goal = PushGoal . toRealPoint . last $ newBlocks + void $ applyThenPushMany + (trace . StartedPushingBlockToTheLedgerDb start goal) + cfg + newBlocks + fo + pure $ Right fo + +{------------------------------------------------------------------------------- + Apply blocks +-------------------------------------------------------------------------------} + +newtype ValidLedgerState l = ValidLedgerState { getValidLedgerState :: l } + +-- | 'Ap' is used to pass information about blocks to ledger DB updates +-- +-- The constructors serve two purposes: +-- +-- * Specify the various parameters +-- +-- 1. Are we passing the block by value or by reference? +-- +-- 2. Are we applying or reapplying the block? +-- +-- * Compute the constraint @c@ on the monad @m@ in order to run the query: +-- +-- 1. If we are passing a block by reference, we must be able to resolve it. +-- +-- 2. If we are applying rather than reapplying, we might have ledger errors. +type Ap :: (Type -> Type) -> (Type -> Type) -> LedgerStateKind -> Type -> Constraint -> Type +data Ap bm m l blk c where + ReapplyVal :: blk -> Ap bm m l blk () + ApplyVal :: blk -> Ap bm m l blk ( ThrowsLedgerError bm m l blk ) + ReapplyRef :: RealPoint blk -> Ap bm m l blk ( ResolvesBlocks m blk ) + ApplyRef :: RealPoint blk -> Ap bm m l blk ( ResolvesBlocks m blk + , ThrowsLedgerError bm m l blk ) + + -- | 'Weaken' increases the constraint on the monad @m@. + -- + -- This is primarily useful when combining multiple 'Ap's in a single + -- homogeneous structure. + Weaken :: (c' => c) => Ap bm m l blk c -> Ap bm m l blk c' + +toRealPoint :: HasHeader blk => Ap bm m l blk c -> RealPoint blk +toRealPoint (ReapplyVal blk) = blockRealPoint blk +toRealPoint (ApplyVal blk) = blockRealPoint blk +toRealPoint (ReapplyRef rp) = rp +toRealPoint (ApplyRef rp) = rp +toRealPoint (Weaken ap) = toRealPoint ap + +-- | Apply blocks to the given forker +applyBlock :: forall m bm c l blk. (ApplyBlock l blk, MonadBase bm m, c, MonadSTM bm) + => LedgerCfg l + -> Ap bm m l blk c + -> Forker bm l blk + -> m (ValidLedgerState (l DiffMK)) +applyBlock cfg ap fo = case ap of + ReapplyVal b -> + ValidLedgerState + <$> withValues b (return . tickThenReapply cfg b) + ApplyVal b -> + ValidLedgerState + <$> withValues b + ( either (throwLedgerError fo (blockRealPoint b)) return + . runExcept + . tickThenApply cfg b + ) + ReapplyRef r -> do + b <- doResolveBlock r + applyBlock cfg (ReapplyVal b) fo + ApplyRef r -> do + b <- doResolveBlock r + applyBlock cfg (ApplyVal b) fo + Weaken ap' -> + applyBlock cfg ap' fo + where + withValues :: blk -> (l ValuesMK -> m (l DiffMK)) -> m (l DiffMK) + withValues blk f = do + l <- liftBase $ atomically $ forkerGetLedgerState fo + vs <- withLedgerTables l + <$> liftBase (forkerReadTables fo (getBlockKeySets blk)) + f vs + +-- | If applying a block on top of the ledger state at the tip is succesful, +-- push the resulting ledger state to the forker. +-- +-- Note that we require @c@ (from the particular choice of @Ap m l blk c@) so +-- this sometimes can throw ledger errors. +applyThenPush :: (ApplyBlock l blk, MonadBase bm m, c, MonadSTM bm) + => LedgerCfg l + -> Ap bm m l blk c + -> Forker bm l blk + -> m () +applyThenPush cfg ap fo = + liftBase . forkerPush fo . getValidLedgerState =<< + applyBlock cfg ap fo + +-- | Apply and push a sequence of blocks (oldest first). +applyThenPushMany :: (ApplyBlock l blk, MonadBase bm m, c, MonadSTM bm) + => (Pushing blk -> m ()) + -> LedgerCfg l + -> [Ap bm m l blk c] + -> Forker bm l blk + -> m () +applyThenPushMany trace cfg aps fo = mapM_ pushAndTrace aps + where + pushAndTrace ap = do + trace $ Pushing . toRealPoint $ ap + applyThenPush cfg ap fo + +{------------------------------------------------------------------------------- + Annotated ledger errors +-------------------------------------------------------------------------------} + +class Monad m => ThrowsLedgerError bm m l blk where + throwLedgerError :: Forker bm l blk -> RealPoint blk -> LedgerErr l -> m a + +instance Monad m => ThrowsLedgerError bm (ExceptT (AnnLedgerError bm l blk) m) l blk where + throwLedgerError f l r = throwError $ AnnLedgerError f l r + +defaultThrowLedgerErrors :: ExceptT (AnnLedgerError bm l blk) m a + -> m (Either (AnnLedgerError bm l blk) a) +defaultThrowLedgerErrors = runExceptT + +defaultResolveWithErrors :: ResolveBlock m blk + -> ExceptT (AnnLedgerError bm l blk) + (ReaderT (ResolveBlock m blk) m) + a + -> m (Either (AnnLedgerError bm l blk) a) +defaultResolveWithErrors resolve = + defaultResolveBlocks resolve + . defaultThrowLedgerErrors + +{------------------------------------------------------------------------------- + Finding blocks +-------------------------------------------------------------------------------} + +-- | Resolve a block +-- +-- Resolving a block reference to the actual block lives in @m@ because +-- it might need to read the block from disk (and can therefore not be +-- done inside an STM transaction). +-- +-- NOTE: The ledger DB will only ask the 'ChainDB' for blocks it knows +-- must exist. If the 'ChainDB' is unable to fulfill the request, data +-- corruption must have happened and the 'ChainDB' should trigger +-- validation mode. +type ResolveBlock m blk = RealPoint blk -> m blk + +-- | Monads in which we can resolve blocks +-- +-- To guide type inference, we insist that we must be able to infer the type +-- of the block we are resolving from the type of the monad. +class Monad m => ResolvesBlocks m blk | m -> blk where + doResolveBlock :: ResolveBlock m blk + +instance Monad m => ResolvesBlocks (ReaderT (ResolveBlock m blk) m) blk where + doResolveBlock r = ReaderT $ \f -> f r + +defaultResolveBlocks :: ResolveBlock m blk + -> ReaderT (ResolveBlock m blk) m a + -> m a +defaultResolveBlocks = flip runReaderT + +-- Quite a specific instance so we can satisfy the fundep +instance Monad m + => ResolvesBlocks (ExceptT e (ReaderT (ResolveBlock m blk) m)) blk where + doResolveBlock = lift . doResolveBlock diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/LedgerDB/Init.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/LedgerDB/Init.hs deleted file mode 100644 index 3c4245b74e..0000000000 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/LedgerDB/Init.hs +++ /dev/null @@ -1,277 +0,0 @@ -{-# LANGUAGE BangPatterns #-} -{-# LANGUAGE DeriveGeneric #-} -{-# LANGUAGE RankNTypes #-} -{-# LANGUAGE ScopedTypeVariables #-} - --- | LedgerDB initialization either from a LedgerState or from a DiskSnapshot -module Ouroboros.Consensus.Storage.LedgerDB.Init ( - -- * Initialization - InitLog (..) - , ReplayStart (..) - , initLedgerDB - -- * Trace - , ReplayGoal (..) - , TraceReplayEvent (..) - , decorateReplayTracerWithGoal - , decorateReplayTracerWithStart - ) where - -import Codec.Serialise.Decoding (Decoder) -import Control.Monad (when) -import Control.Monad.Except (ExceptT, runExceptT, throwError, - withExceptT) -import Control.Monad.Trans.Class (lift) -import Control.Tracer -import Data.Word -import GHC.Generics (Generic) -import GHC.Stack -import Ouroboros.Consensus.Block -import Ouroboros.Consensus.Ledger.Abstract -import Ouroboros.Consensus.Ledger.Extended -import Ouroboros.Consensus.Ledger.Inspect -import Ouroboros.Consensus.Ledger.SupportsProtocol -import Ouroboros.Consensus.Storage.ImmutableDB.Stream -import Ouroboros.Consensus.Storage.LedgerDB.LedgerDB -import Ouroboros.Consensus.Storage.LedgerDB.Query -import Ouroboros.Consensus.Storage.LedgerDB.Snapshots -import Ouroboros.Consensus.Storage.LedgerDB.Update -import Ouroboros.Consensus.Util.IOLike -import Ouroboros.Network.Block (Point (Point)) -import System.FS.API - -{------------------------------------------------------------------------------- - Initialize the DB --------------------------------------------------------------------------------} - --- | Initialization log --- --- The initialization log records which snapshots from disk were considered, --- in which order, and why some snapshots were rejected. It is primarily useful --- for monitoring purposes. -data InitLog blk = - -- | Defaulted to initialization from genesis - -- - -- NOTE: Unless the blockchain is near genesis, we should see this /only/ - -- if data corrupted occurred. - InitFromGenesis - - -- | Used a snapshot corresponding to the specified tip - | InitFromSnapshot DiskSnapshot (RealPoint blk) - - -- | Initialization skipped a snapshot - -- - -- We record the reason why it was skipped. - -- - -- NOTE: We should /only/ see this if data corrupted occurred. - | InitFailure DiskSnapshot (SnapshotFailure blk) (InitLog blk) - deriving (Show, Eq, Generic) - --- | Initialize the ledger DB from the most recent snapshot on disk --- --- If no such snapshot can be found, use the genesis ledger DB. Returns the --- initialized DB as well as the block reference corresponding to the snapshot --- we found on disk (the latter primarily for testing/monitoring purposes). --- --- We do /not/ catch any exceptions thrown during streaming; should any be --- thrown, it is the responsibility of the 'ChainDB' to catch these --- and trigger (further) validation. We only discard snapshots if --- --- * We cannot deserialise them, or --- * they are /ahead/ of the chain --- --- It is possible that the Ledger DB will not be able to roll back @k@ blocks --- after initialization if the chain has been truncated (data corruption). --- --- We do /not/ attempt to use multiple ledger states from disk to construct the --- ledger DB. Instead we load only a /single/ ledger state from disk, and --- /compute/ all subsequent ones. This is important, because the ledger states --- obtained in this way will (hopefully) share much of their memory footprint --- with their predecessors. -initLedgerDB :: - forall m blk. ( - IOLike m - , LedgerSupportsProtocol blk - , InspectLedger blk - , HasCallStack - ) - => Tracer m (ReplayGoal blk -> TraceReplayEvent blk) - -> Tracer m (TraceSnapshotEvent blk) - -> SomeHasFS m - -> (forall s. Decoder s (ExtLedgerState blk)) - -> (forall s. Decoder s (HeaderHash blk)) - -> LedgerDbCfg (ExtLedgerState blk) - -> m (ExtLedgerState blk) -- ^ Genesis ledger state - -> StreamAPI m blk blk - -> m (InitLog blk, LedgerDB' blk, Word64) -initLedgerDB replayTracer - tracer - hasFS - decLedger - decHash - cfg - getGenesisLedger - stream = do - snapshots <- listSnapshots hasFS - tryNewestFirst id snapshots - where - tryNewestFirst :: (InitLog blk -> InitLog blk) - -> [DiskSnapshot] - -> m (InitLog blk, LedgerDB' blk, Word64) - tryNewestFirst acc [] = do - -- We're out of snapshots. Start at genesis - traceWith replayTracer ReplayFromGenesis - initDb <- ledgerDbWithAnchor <$> getGenesisLedger - let replayTracer' = decorateReplayTracerWithStart (Point Origin) replayTracer - ml <- runExceptT $ initStartingWith replayTracer' cfg stream initDb - case ml of - Left _ -> error "invariant violation: invalid current chain" - Right (l, replayed) -> return (acc InitFromGenesis, l, replayed) - tryNewestFirst acc (s:ss) = do - -- If we fail to use this snapshot, delete it and try an older one - ml <- runExceptT $ initFromSnapshot - replayTracer - hasFS - decLedger - decHash - cfg - stream - s - case ml of - Left err -> do - when (diskSnapshotIsTemporary s) $ - -- We don't delete permanent snapshots, even if we couldn't parse - -- them - deleteSnapshot hasFS s - traceWith tracer $ InvalidSnapshot s err - tryNewestFirst (acc . InitFailure s err) ss - Right (r, l, replayed) -> - return (acc (InitFromSnapshot s r), l, replayed) - -{------------------------------------------------------------------------------- - Internal: initialize using the given snapshot --------------------------------------------------------------------------------} - --- | Attempt to initialize the ledger DB from the given snapshot --- --- If the chain DB or ledger layer reports an error, the whole thing is aborted --- and an error is returned. This should not throw any errors itself (ignoring --- unexpected exceptions such as asynchronous exceptions, of course). -initFromSnapshot :: - forall m blk. ( - IOLike m - , LedgerSupportsProtocol blk - , InspectLedger blk - , HasCallStack - ) - => Tracer m (ReplayGoal blk -> TraceReplayEvent blk) - -> SomeHasFS m - -> (forall s. Decoder s (ExtLedgerState blk)) - -> (forall s. Decoder s (HeaderHash blk)) - -> LedgerDbCfg (ExtLedgerState blk) - -> StreamAPI m blk blk - -> DiskSnapshot - -> ExceptT (SnapshotFailure blk) m (RealPoint blk, LedgerDB' blk, Word64) -initFromSnapshot tracer hasFS decLedger decHash cfg stream ss = do - initSS <- withExceptT InitFailureRead $ - readSnapshot hasFS decLedger decHash ss - let replayStart = castPoint $ getTip initSS - case pointToWithOriginRealPoint replayStart of - Origin -> throwError InitFailureGenesis - NotOrigin realReplayStart -> do - let tracer' = decorateReplayTracerWithStart replayStart tracer - lift $ traceWith tracer' $ ReplayFromSnapshot ss - (initDB, replayed) <- - initStartingWith - tracer' - cfg - stream - (ledgerDbWithAnchor initSS) - return (realReplayStart, initDB, replayed) - --- | Attempt to initialize the ledger DB starting from the given ledger DB -initStartingWith :: - forall m blk. ( - Monad m - , LedgerSupportsProtocol blk - , InspectLedger blk - , HasCallStack - ) - => Tracer m (ReplayStart blk -> ReplayGoal blk -> TraceReplayEvent blk) - -> LedgerDbCfg (ExtLedgerState blk) - -> StreamAPI m blk blk - -> LedgerDB' blk - -> ExceptT (SnapshotFailure blk) m (LedgerDB' blk, Word64) -initStartingWith tracer cfg stream initDb = do - streamAll stream (castPoint (ledgerDbTip initDb)) - InitFailureTooRecent - (initDb, 0) - push - where - push :: blk -> (LedgerDB' blk, Word64) -> m (LedgerDB' blk, Word64) - push blk !(!db, !replayed) = do - !db' <- ledgerDbPush cfg (ReapplyVal blk) db - - let replayed' :: Word64 - !replayed' = replayed + 1 - - events :: [LedgerEvent blk] - events = inspectLedger - (getExtLedgerCfg (ledgerDbCfg cfg)) - (ledgerState (ledgerDbCurrent db)) - (ledgerState (ledgerDbCurrent db')) - - traceWith tracer (ReplayedBlock (blockRealPoint blk) events) - return (db', replayed') - -{------------------------------------------------------------------------------- - Trace events --------------------------------------------------------------------------------} - --- | Add the tip of the Immutable DB to the trace event --- --- Between the tip of the immutable DB and the point of the starting block, --- the node could (if it so desired) easily compute a "percentage complete". -decorateReplayTracerWithGoal :: - Point blk -- ^ Tip of the ImmutableDB - -> Tracer m (TraceReplayEvent blk) - -> Tracer m (ReplayGoal blk -> TraceReplayEvent blk) -decorateReplayTracerWithGoal immTip = contramap ($ (ReplayGoal immTip)) - --- | Add the block at which a replay started. --- --- This allows to compute a "percentage complete" when tracing the events. -decorateReplayTracerWithStart :: - Point blk -- ^ Starting point of the replay - -> Tracer m (ReplayGoal blk -> TraceReplayEvent blk) - -> Tracer m (ReplayStart blk -> ReplayGoal blk -> TraceReplayEvent blk) -decorateReplayTracerWithStart start = contramap ($ (ReplayStart start)) - --- | Which point the replay started from -newtype ReplayStart blk = ReplayStart (Point blk) deriving (Eq, Show) - --- | Which point the replay is expected to end at -newtype ReplayGoal blk = ReplayGoal (Point blk) deriving (Eq, Show) - --- | Events traced while replaying blocks against the ledger to bring it up to --- date w.r.t. the tip of the ImmutableDB during initialisation. As this --- process takes a while, we trace events to inform higher layers of our --- progress. -data TraceReplayEvent blk - = -- | There were no LedgerDB snapshots on disk, so we're replaying all blocks - -- starting from Genesis against the initial ledger. - ReplayFromGenesis - (ReplayGoal blk) -- ^ the block at the tip of the ImmutableDB - -- | There was a LedgerDB snapshot on disk corresponding to the given tip. - -- We're replaying more recent blocks against it. - | ReplayFromSnapshot - DiskSnapshot - (ReplayStart blk) -- ^ the block at which this replay started - (ReplayGoal blk) -- ^ the block at the tip of the ImmutableDB - -- | We replayed the given block (reference) on the genesis snapshot during - -- the initialisation of the LedgerDB. Used during ImmutableDB replay. - | ReplayedBlock - (RealPoint blk) -- ^ the block being replayed - [LedgerEvent blk] - (ReplayStart blk) -- ^ the block at which this replay started - (ReplayGoal blk) -- ^ the block at the tip of the ImmutableDB - deriving (Generic, Eq, Show) diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/LedgerDB/LedgerDB.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/LedgerDB/LedgerDB.hs deleted file mode 100644 index 15e2745c26..0000000000 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/LedgerDB/LedgerDB.hs +++ /dev/null @@ -1,134 +0,0 @@ -{-# LANGUAGE DeriveAnyClass #-} -{-# LANGUAGE DeriveGeneric #-} -{-# LANGUAGE FlexibleContexts #-} -{-# LANGUAGE FlexibleInstances #-} -{-# LANGUAGE MultiParamTypeClasses #-} -{-# LANGUAGE StandaloneDeriving #-} -{-# LANGUAGE TypeFamilies #-} -{-# LANGUAGE UndecidableInstances #-} - -module Ouroboros.Consensus.Storage.LedgerDB.LedgerDB ( - -- * LedgerDB - Checkpoint (..) - , LedgerDB (..) - , LedgerDB' - , LedgerDbCfg (..) - , configLedgerDb - ) where - -import GHC.Generics (Generic) -import NoThunks.Class (NoThunks) -import Ouroboros.Consensus.Block -import Ouroboros.Consensus.Config -import Ouroboros.Consensus.Ledger.Abstract -import Ouroboros.Consensus.Ledger.Extended (ExtLedgerCfg (..), - ExtLedgerState) -import Ouroboros.Consensus.Protocol.Abstract (ConsensusProtocol) -import Ouroboros.Network.AnchoredSeq (Anchorable (..), - AnchoredSeq (..)) -import qualified Ouroboros.Network.AnchoredSeq as AS - -{------------------------------------------------------------------------------- - LedgerDB --------------------------------------------------------------------------------} - --- | Internal state of the ledger DB --- --- The ledger DB looks like --- --- > anchor |> snapshots <| current --- --- where @anchor@ records the oldest known snapshot and @current@ the most --- recent. The anchor is the oldest point we can roll back to. --- --- We take a snapshot after each block is applied and keep in memory a window --- of the last @k@ snapshots. We have verified empirically (#1936) that the --- overhead of keeping @k@ snapshots in memory is small, i.e., about 5% --- compared to keeping a snapshot every 100 blocks. This is thanks to sharing --- between consecutive snapshots. --- --- As an example, suppose we have @k = 6@. The ledger DB grows as illustrated --- below, where we indicate the anchor number of blocks, the stored snapshots, --- and the current ledger. --- --- > anchor |> # [ snapshots ] <| tip --- > --------------------------------------------------------------------------- --- > G |> (0) [ ] <| G --- > G |> (1) [ L1] <| L1 --- > G |> (2) [ L1, L2] <| L2 --- > G |> (3) [ L1, L2, L3] <| L3 --- > G |> (4) [ L1, L2, L3, L4] <| L4 --- > G |> (5) [ L1, L2, L3, L4, L5] <| L5 --- > G |> (6) [ L1, L2, L3, L4, L5, L6] <| L6 --- > L1 |> (6) [ L2, L3, L4, L5, L6, L7] <| L7 --- > L2 |> (6) [ L3, L4, L5, L6, L7, L8] <| L8 --- > L3 |> (6) [ L4, L5, L6, L7, L8, L9] <| L9 (*) --- > L4 |> (6) [ L5, L6, L7, L8, L9, L10] <| L10 --- > L5 |> (6) [*L6, L7, L8, L9, L10, L11] <| L11 --- > L6 |> (6) [ L7, L8, L9, L10, L11, L12] <| L12 --- > L7 |> (6) [ L8, L9, L10, L12, L12, L13] <| L13 --- > L8 |> (6) [ L9, L10, L12, L12, L13, L14] <| L14 --- --- The ledger DB must guarantee that at all times we are able to roll back @k@ --- blocks. For example, if we are on line (*), and roll back 6 blocks, we get --- --- > L3 |> [] -newtype LedgerDB l = LedgerDB { - -- | Ledger states - ledgerDbCheckpoints :: AnchoredSeq - (WithOrigin SlotNo) - (Checkpoint l) - (Checkpoint l) - } - deriving (Generic) - -type LedgerDB' blk = LedgerDB (ExtLedgerState blk) - -deriving instance Show l => Show (LedgerDB l) -deriving instance Eq l => Eq (LedgerDB l) -deriving instance NoThunks l => NoThunks (LedgerDB l) - -type instance HeaderHash (LedgerDB l) = HeaderHash l - -instance IsLedger l => GetTip (LedgerDB l) where - getTip = castPoint - . getTip - . either unCheckpoint unCheckpoint - . AS.head - . ledgerDbCheckpoints - --- | Internal newtype wrapper around a ledger state @l@ so that we can define a --- non-blanket 'Anchorable' instance. -newtype Checkpoint l = Checkpoint { - unCheckpoint :: l - } - deriving (Generic) - -deriving instance Show l => Show (Checkpoint l) -deriving instance Eq l => Eq (Checkpoint l) -deriving instance NoThunks l => NoThunks (Checkpoint l) - -instance GetTip l => Anchorable (WithOrigin SlotNo) (Checkpoint l) (Checkpoint l) where - asAnchor = id - getAnchorMeasure _ = getTipSlot . unCheckpoint - -{------------------------------------------------------------------------------- - LedgerDB Config --------------------------------------------------------------------------------} - -data LedgerDbCfg l = LedgerDbCfg { - ledgerDbCfgSecParam :: !SecurityParam - , ledgerDbCfg :: !(LedgerCfg l) - } - deriving (Generic) - -deriving instance NoThunks (LedgerCfg l) => NoThunks (LedgerDbCfg l) - -configLedgerDb :: - ConsensusProtocol (BlockProtocol blk) - => TopLevelConfig blk - -> LedgerDbCfg (ExtLedgerState blk) -configLedgerDb cfg = LedgerDbCfg { - ledgerDbCfgSecParam = configSecurityParam cfg - , ledgerDbCfg = ExtLedgerCfg cfg - } diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/LedgerDB/Query.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/LedgerDB/Query.hs deleted file mode 100644 index aaa4e20f2c..0000000000 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/LedgerDB/Query.hs +++ /dev/null @@ -1,81 +0,0 @@ -{-# LANGUAGE ConstraintKinds #-} -{-# LANGUAGE FlexibleContexts #-} -{-# LANGUAGE FlexibleInstances #-} -{-# LANGUAGE GADTs #-} -{-# LANGUAGE MultiParamTypeClasses #-} -{-# LANGUAGE QuantifiedConstraints #-} -{-# LANGUAGE RankNTypes #-} -{-# LANGUAGE RecordWildCards #-} -{-# LANGUAGE ScopedTypeVariables #-} -{-# LANGUAGE TypeFamilies #-} -{-# LANGUAGE TypeOperators #-} -{-# LANGUAGE UndecidableInstances #-} - -module Ouroboros.Consensus.Storage.LedgerDB.Query ( - ledgerDbAnchor - , ledgerDbCurrent - , ledgerDbIsSaturated - , ledgerDbMaxRollback - , ledgerDbPast - , ledgerDbSnapshots - , ledgerDbTip - ) where - -import Data.Foldable (find) -import Data.Word -import Ouroboros.Consensus.Block -import Ouroboros.Consensus.Config -import Ouroboros.Consensus.Ledger.Abstract -import Ouroboros.Consensus.Storage.LedgerDB.LedgerDB -import qualified Ouroboros.Network.AnchoredSeq as AS - --- | The ledger state at the tip of the chain -ledgerDbCurrent :: GetTip l => LedgerDB l -> l -ledgerDbCurrent = either unCheckpoint unCheckpoint . AS.head . ledgerDbCheckpoints - --- | Information about the state of the ledger at the anchor -ledgerDbAnchor :: LedgerDB l -> l -ledgerDbAnchor = unCheckpoint . AS.anchor . ledgerDbCheckpoints - --- | All snapshots currently stored by the ledger DB (new to old) --- --- This also includes the snapshot at the anchor. For each snapshot we also --- return the distance from the tip. -ledgerDbSnapshots :: LedgerDB l -> [(Word64, l)] -ledgerDbSnapshots LedgerDB{..} = - zip - [0..] - (map unCheckpoint (AS.toNewestFirst ledgerDbCheckpoints) - <> [unCheckpoint (AS.anchor ledgerDbCheckpoints)]) - --- | How many blocks can we currently roll back? -ledgerDbMaxRollback :: GetTip l => LedgerDB l -> Word64 -ledgerDbMaxRollback LedgerDB{..} = fromIntegral (AS.length ledgerDbCheckpoints) - --- | Reference to the block at the tip of the chain -ledgerDbTip :: GetTip l => LedgerDB l -> Point l -ledgerDbTip = castPoint . getTip . ledgerDbCurrent - --- | Have we seen at least @k@ blocks? -ledgerDbIsSaturated :: GetTip l => SecurityParam -> LedgerDB l -> Bool -ledgerDbIsSaturated (SecurityParam k) db = - ledgerDbMaxRollback db >= k - --- | Get a past ledger state --- --- \( O(\log(\min(i,n-i)) \) --- --- When no ledger state (or anchor) has the given 'Point', 'Nothing' is --- returned. -ledgerDbPast :: - (HasHeader blk, IsLedger l, HeaderHash l ~ HeaderHash blk) - => Point blk - -> LedgerDB l - -> Maybe l -ledgerDbPast pt db - | pt == castPoint (getTip (ledgerDbAnchor db)) - = Just $ ledgerDbAnchor db - | otherwise - = fmap unCheckpoint $ - find ((== pt) . castPoint . getTip . unCheckpoint) $ - AS.lookupByMeasure (pointSlot pt) (ledgerDbCheckpoints db) diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/LedgerDB/Snapshots.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/LedgerDB/Snapshots.hs deleted file mode 100644 index 970cac6abd..0000000000 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/LedgerDB/Snapshots.hs +++ /dev/null @@ -1,297 +0,0 @@ -{-# LANGUAGE DeriveGeneric #-} -{-# LANGUAGE FlexibleContexts #-} -{-# LANGUAGE FlexibleInstances #-} -{-# LANGUAGE KindSignatures #-} -{-# LANGUAGE MultiParamTypeClasses #-} -{-# LANGUAGE NamedFieldPuns #-} -{-# LANGUAGE RankNTypes #-} -{-# LANGUAGE RecordWildCards #-} -{-# LANGUAGE ScopedTypeVariables #-} -{-# LANGUAGE TypeApplications #-} - -module Ouroboros.Consensus.Storage.LedgerDB.Snapshots ( - DiskSnapshot (..) - -- * Read from disk - , SnapshotFailure (..) - , diskSnapshotIsTemporary - , listSnapshots - , readSnapshot - -- * Write to disk - , takeSnapshot - , trimSnapshots - , writeSnapshot - -- * Low-level API (primarily exposed for testing) - , decodeSnapshotBackwardsCompatible - , deleteSnapshot - , encodeSnapshot - , snapshotToFileName - , snapshotToPath - -- * Trace - , TraceSnapshotEvent (..) - ) where - -import qualified Codec.CBOR.Write as CBOR -import Codec.Serialise.Decoding (Decoder) -import qualified Codec.Serialise.Decoding as Dec -import Codec.Serialise.Encoding (Encoding) -import Control.Monad (forM, void) -import Control.Monad.Except (ExceptT (..)) -import Control.Tracer -import Data.Functor.Contravariant ((>$<)) -import qualified Data.List as List -import Data.Maybe (isJust, mapMaybe) -import Data.Ord (Down (..)) -import Data.Set (Set) -import qualified Data.Set as Set -import Data.Word -import GHC.Generics (Generic) -import GHC.Stack -import Ouroboros.Consensus.Block -import Ouroboros.Consensus.Ledger.Abstract -import Ouroboros.Consensus.Ledger.Extended -import Ouroboros.Consensus.Storage.LedgerDB.DiskPolicy -import Ouroboros.Consensus.Util.CBOR (ReadIncrementalErr, - decodeWithOrigin, readIncremental) -import Ouroboros.Consensus.Util.Enclose -import Ouroboros.Consensus.Util.IOLike -import Ouroboros.Consensus.Util.Versioned -import System.FS.API.Lazy -import Text.Read (readMaybe) - -{------------------------------------------------------------------------------- - Write to disk --------------------------------------------------------------------------------} - -data SnapshotFailure blk = - -- | We failed to deserialise the snapshot - -- - -- This can happen due to data corruption in the ledger DB. - InitFailureRead ReadIncrementalErr - - -- | This snapshot is too recent (ahead of the tip of the chain) - | InitFailureTooRecent (RealPoint blk) - - -- | This snapshot was of the ledger state at genesis, even though we never - -- take snapshots at genesis, so this is unexpected. - | InitFailureGenesis - deriving (Show, Eq, Generic) - -data TraceSnapshotEvent blk - = InvalidSnapshot DiskSnapshot (SnapshotFailure blk) - -- ^ An on disk snapshot was skipped because it was invalid. - | TookSnapshot DiskSnapshot (RealPoint blk) EnclosingTimed - -- ^ A snapshot was written to disk. - | DeletedSnapshot DiskSnapshot - -- ^ An old or invalid on-disk snapshot was deleted - deriving (Generic, Eq, Show) - --- | Take a snapshot of the /oldest ledger state/ in the ledger DB --- --- We write the /oldest/ ledger state to disk because the intention is to only --- write ledger states to disk that we know to be immutable. Primarily for --- testing purposes, 'takeSnapshot' returns the block reference corresponding --- to the snapshot that we wrote. --- --- If a snapshot with the same number already exists on disk or if the tip is at --- genesis, no snapshot is taken. --- --- Note that an EBB can have the same slot number and thus snapshot number as --- the block after it. This doesn't matter. The one block difference in the --- ledger state doesn't warrant an additional snapshot. The number in the name --- of the snapshot is only indicative, we don't rely on it being correct. --- --- NOTE: This is a lower-level API that takes a snapshot independent from --- whether this snapshot corresponds to a state that is more than @k@ back. --- --- TODO: Should we delete the file if an error occurs during writing? -takeSnapshot :: - forall m blk. (MonadThrow m, MonadMonotonicTime m, IsLedger (LedgerState blk)) - => Tracer m (TraceSnapshotEvent blk) - -> SomeHasFS m - -> (ExtLedgerState blk -> Encoding) - -> ExtLedgerState blk -> m (Maybe (DiskSnapshot, RealPoint blk)) -takeSnapshot tracer hasFS encLedger oldest = - case pointToWithOriginRealPoint (castPoint (getTip oldest)) of - Origin -> - return Nothing - NotOrigin tip -> do - let number = unSlotNo (realPointSlot tip) - snapshot = DiskSnapshot number Nothing - snapshots <- listSnapshots hasFS - if List.any ((== number) . dsNumber) snapshots then - return Nothing - else do - encloseTimedWith (TookSnapshot snapshot tip >$< tracer) - $ writeSnapshot hasFS encLedger snapshot oldest - return $ Just (snapshot, tip) - --- | Trim the number of on disk snapshots so that at most 'onDiskNumSnapshots' --- snapshots are stored on disk. The oldest snapshots are deleted. --- --- The deleted snapshots are returned. -trimSnapshots :: - Monad m - => Tracer m (TraceSnapshotEvent r) - -> SomeHasFS m - -> DiskPolicy - -> m [DiskSnapshot] -trimSnapshots tracer hasFS DiskPolicy{..} = do - -- We only trim temporary snapshots - snapshots <- filter diskSnapshotIsTemporary <$> listSnapshots hasFS - -- The snapshot are most recent first, so we can simply drop from the - -- front to get the snapshots that are "too" old. - forM (drop (fromIntegral onDiskNumSnapshots) snapshots) $ \snapshot -> do - deleteSnapshot hasFS snapshot - traceWith tracer $ DeletedSnapshot snapshot - return snapshot - -{------------------------------------------------------------------------------- - Internal: reading from disk --------------------------------------------------------------------------------} - -data DiskSnapshot = DiskSnapshot { - -- | Snapshots are numbered. We will try the snapshots with the highest - -- number first. - -- - -- When creating a snapshot, we use the slot number of the ledger state it - -- corresponds to as the snapshot number. This gives an indication of how - -- recent the snapshot is. - -- - -- Note that the snapshot names are only indicative, we don't rely on the - -- snapshot number matching the slot number of the corresponding ledger - -- state. We only use the snapshots numbers to determine the order in - -- which we try them. - dsNumber :: Word64 - - -- | Snapshots can optionally have a suffix, separated by the snapshot - -- number with an underscore, e.g., @4492799_last_Byron@. This suffix acts - -- as metadata for the operator of the node. Snapshots with a suffix will - -- /not be trimmed/. - , dsSuffix :: Maybe String - } - deriving (Show, Eq, Ord, Generic) - --- | Named snapshot are permanent, they will never be deleted when trimming. -diskSnapshotIsPermanent :: DiskSnapshot -> Bool -diskSnapshotIsPermanent = isJust . dsSuffix - --- | The snapshots that are periodically created are temporary, they will be --- deleted when trimming -diskSnapshotIsTemporary :: DiskSnapshot -> Bool -diskSnapshotIsTemporary = not . diskSnapshotIsPermanent - --- | Read snapshot from disk -readSnapshot :: - forall m blk. IOLike m - => SomeHasFS m - -> (forall s. Decoder s (ExtLedgerState blk)) - -> (forall s. Decoder s (HeaderHash blk)) - -> DiskSnapshot - -> ExceptT ReadIncrementalErr m (ExtLedgerState blk) -readSnapshot hasFS decLedger decHash = - ExceptT - . readIncremental hasFS decoder - . snapshotToPath - where - decoder :: Decoder s (ExtLedgerState blk) - decoder = decodeSnapshotBackwardsCompatible (Proxy @blk) decLedger decHash - --- | Write snapshot to disk -writeSnapshot :: - forall m blk. MonadThrow m - => SomeHasFS m - -> (ExtLedgerState blk -> Encoding) - -> DiskSnapshot - -> ExtLedgerState blk -> m () -writeSnapshot (SomeHasFS hasFS) encLedger ss cs = do - withFile hasFS (snapshotToPath ss) (WriteMode MustBeNew) $ \h -> - void $ hPut hasFS h $ CBOR.toBuilder (encode cs) - where - encode :: ExtLedgerState blk -> Encoding - encode = encodeSnapshot encLedger - --- | Delete snapshot from disk -deleteSnapshot :: HasCallStack => SomeHasFS m -> DiskSnapshot -> m () -deleteSnapshot (SomeHasFS HasFS{..}) = removeFile . snapshotToPath - --- | List on-disk snapshots, highest number first. -listSnapshots :: Monad m => SomeHasFS m -> m [DiskSnapshot] -listSnapshots (SomeHasFS HasFS{..}) = - aux <$> listDirectory (mkFsPath []) - where - aux :: Set String -> [DiskSnapshot] - aux = List.sortOn (Down . dsNumber) . mapMaybe snapshotFromPath . Set.toList - -snapshotToFileName :: DiskSnapshot -> String -snapshotToFileName DiskSnapshot { dsNumber, dsSuffix } = - show dsNumber <> suffix - where - suffix = case dsSuffix of - Nothing -> "" - Just s -> "_" <> s - -snapshotToPath :: DiskSnapshot -> FsPath -snapshotToPath = mkFsPath . (:[]) . snapshotToFileName - -snapshotFromPath :: String -> Maybe DiskSnapshot -snapshotFromPath fileName = do - number <- readMaybe prefix - return $ DiskSnapshot number suffix' - where - (prefix, suffix) = break (== '_') fileName - - suffix' :: Maybe String - suffix' = case suffix of - "" -> Nothing - _ : str -> Just str - -{------------------------------------------------------------------------------- - Serialisation --------------------------------------------------------------------------------} - --- | Version 1: uses versioning ('Ouroboros.Consensus.Util.Versioned') and only --- encodes the ledger state @l@. -snapshotEncodingVersion1 :: VersionNumber -snapshotEncodingVersion1 = 1 - --- | Encoder to be used in combination with 'decodeSnapshotBackwardsCompatible'. -encodeSnapshot :: (l -> Encoding) -> l -> Encoding -encodeSnapshot encodeLedger l = - encodeVersion snapshotEncodingVersion1 (encodeLedger l) - --- | To remain backwards compatible with existing snapshots stored on disk, we --- must accept the old format as well as the new format. --- --- The old format: --- * The tip: @WithOrigin (RealPoint blk)@ --- * The chain length: @Word64@ --- * The ledger state: @l@ --- --- The new format is described by 'snapshotEncodingVersion1'. --- --- This decoder will accept and ignore them. The encoder ('encodeSnapshot') will --- no longer encode them. -decodeSnapshotBackwardsCompatible :: - forall l blk. - Proxy blk - -> (forall s. Decoder s l) - -> (forall s. Decoder s (HeaderHash blk)) - -> forall s. Decoder s l -decodeSnapshotBackwardsCompatible _ decodeLedger decodeHash = - decodeVersionWithHook - decodeOldFormat - [(snapshotEncodingVersion1, Decode decodeVersion1)] - where - decodeVersion1 :: forall s. Decoder s l - decodeVersion1 = decodeLedger - - decodeOldFormat :: Maybe Int -> forall s. Decoder s l - decodeOldFormat (Just 3) = do - _ <- withOriginRealPointToPoint <$> - decodeWithOrigin (decodeRealPoint @blk decodeHash) - _ <- Dec.decodeWord64 - decodeLedger - decodeOldFormat mbListLen = - fail $ - "decodeSnapshotBackwardsCompatible: invalid start " <> - show mbListLen diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/LedgerDB/Update.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/LedgerDB/Update.hs deleted file mode 100644 index d79bd72c4a..0000000000 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/LedgerDB/Update.hs +++ /dev/null @@ -1,386 +0,0 @@ -{-# LANGUAGE ConstraintKinds #-} -{-# LANGUAGE DeriveGeneric #-} -{-# LANGUAGE FlexibleContexts #-} -{-# LANGUAGE FlexibleInstances #-} -{-# LANGUAGE FunctionalDependencies #-} -{-# LANGUAGE GADTs #-} -{-# LANGUAGE QuantifiedConstraints #-} -{-# LANGUAGE RankNTypes #-} -{-# LANGUAGE RecordWildCards #-} -{-# LANGUAGE ScopedTypeVariables #-} -{-# LANGUAGE StandaloneKindSignatures #-} -{-# LANGUAGE TypeFamilies #-} -{-# LANGUAGE UndecidableInstances #-} - --- | Accessors for the LedgerDB and management --- --- This module defines the operations that can be done on a LedgerDB, as well as --- the procedures to apply a block to a LedgerDB and pushing the resulting --- LedgerState into the DB. -module Ouroboros.Consensus.Storage.LedgerDB.Update ( - -- * LedgerDB management - ledgerDbWithAnchor - -- * Applying blocks - , AnnLedgerError (..) - , AnnLedgerError' - , Ap (..) - , ExceededRollback (..) - , ThrowsLedgerError (..) - , defaultThrowLedgerErrors - -- * Block resolution - , ResolveBlock - , ResolvesBlocks (..) - , defaultResolveBlocks - -- * Updates - , defaultResolveWithErrors - , ledgerDbBimap - , ledgerDbPrune - , ledgerDbPush - , ledgerDbSwitch - -- * Pure API - , ledgerDbPush' - , ledgerDbPushMany' - , ledgerDbSwitch' - -- * Trace - , PushGoal (..) - , PushStart (..) - , Pushing (..) - , UpdateLedgerDbTraceEvent (..) - ) where - -import Control.Monad.Except (ExceptT, runExcept, runExceptT, - throwError) -import Control.Monad.Reader (ReaderT (..), runReaderT) -import Control.Monad.Trans.Class (lift) -import Data.Functor.Identity -import Data.Kind (Constraint, Type) -import Data.Word -import GHC.Generics -import Ouroboros.Consensus.Block -import Ouroboros.Consensus.Config -import Ouroboros.Consensus.Ledger.Abstract -import Ouroboros.Consensus.Ledger.Extended -import Ouroboros.Consensus.Storage.LedgerDB.LedgerDB -import Ouroboros.Consensus.Storage.LedgerDB.Query -import Ouroboros.Consensus.Util -import Ouroboros.Network.AnchoredSeq (Anchorable (..), - AnchoredSeq (..)) -import qualified Ouroboros.Network.AnchoredSeq as AS - -{------------------------------------------------------------------------------- - Apply blocks --------------------------------------------------------------------------------} - --- | 'Ap' is used to pass information about blocks to ledger DB updates --- --- The constructors serve two purposes: --- --- * Specify the various parameters --- a. Are we passing the block by value or by reference? --- b. Are we applying or reapplying the block? --- --- * Compute the constraint @c@ on the monad @m@ in order to run the query: --- a. If we are passing a block by reference, we must be able to resolve it. --- b. If we are applying rather than reapplying, we might have ledger errors. -type Ap :: (Type -> Type) -> Type -> Type -> Constraint -> Type -data Ap m l blk c where - ReapplyVal :: blk -> Ap m l blk () - ApplyVal :: blk -> Ap m l blk ( ThrowsLedgerError m l blk) - ReapplyRef :: RealPoint blk -> Ap m l blk (ResolvesBlocks m blk) - ApplyRef :: RealPoint blk -> Ap m l blk (ResolvesBlocks m blk, ThrowsLedgerError m l blk) - - -- | 'Weaken' increases the constraint on the monad @m@. - -- - -- This is primarily useful when combining multiple 'Ap's in a single - -- homogeneous structure. - Weaken :: (c' => c) => Ap m l blk c -> Ap m l blk c' - -{------------------------------------------------------------------------------- - Internal utilities for 'Ap' --------------------------------------------------------------------------------} - -toRealPoint :: HasHeader blk => Ap m l blk c -> RealPoint blk -toRealPoint (ReapplyVal blk) = blockRealPoint blk -toRealPoint (ApplyVal blk) = blockRealPoint blk -toRealPoint (ReapplyRef rp) = rp -toRealPoint (ApplyRef rp) = rp -toRealPoint (Weaken ap) = toRealPoint ap - --- | Apply block to the current ledger state --- --- We take in the entire 'LedgerDB' because we record that as part of errors. -applyBlock :: forall m c l blk. (ApplyBlock l blk, Monad m, c) - => LedgerCfg l - -> Ap m l blk c - -> LedgerDB l -> m l -applyBlock cfg ap db = case ap of - ReapplyVal b -> - return $ - tickThenReapply cfg b l - ApplyVal b -> - either (throwLedgerError db (blockRealPoint b)) return $ runExcept $ - tickThenApply cfg b l - ReapplyRef r -> do - b <- doResolveBlock r - return $ - tickThenReapply cfg b l - ApplyRef r -> do - b <- doResolveBlock r - either (throwLedgerError db r) return $ runExcept $ - tickThenApply cfg b l - Weaken ap' -> - applyBlock cfg ap' db - where - l :: l - l = ledgerDbCurrent db - -{------------------------------------------------------------------------------- - Resolving blocks maybe from disk --------------------------------------------------------------------------------} - --- | Resolve a block --- --- Resolving a block reference to the actual block lives in @m@ because --- it might need to read the block from disk (and can therefore not be --- done inside an STM transaction). --- --- NOTE: The ledger DB will only ask the 'ChainDB' for blocks it knows --- must exist. If the 'ChainDB' is unable to fulfill the request, data --- corruption must have happened and the 'ChainDB' should trigger --- validation mode. -type ResolveBlock m blk = RealPoint blk -> m blk - --- | Monads in which we can resolve blocks --- --- To guide type inference, we insist that we must be able to infer the type --- of the block we are resolving from the type of the monad. -class Monad m => ResolvesBlocks m blk | m -> blk where - doResolveBlock :: ResolveBlock m blk - -instance Monad m => ResolvesBlocks (ReaderT (ResolveBlock m blk) m) blk where - doResolveBlock r = ReaderT $ \f -> f r - -defaultResolveBlocks :: ResolveBlock m blk - -> ReaderT (ResolveBlock m blk) m a - -> m a -defaultResolveBlocks = flip runReaderT - --- Quite a specific instance so we can satisfy the fundep -instance Monad m - => ResolvesBlocks (ExceptT e (ReaderT (ResolveBlock m blk) m)) blk where - doResolveBlock = lift . doResolveBlock - -{------------------------------------------------------------------------------- - A ledger error annotated with the LedgerDB --------------------------------------------------------------------------------} - --- | Annotated ledger errors -data AnnLedgerError l blk = AnnLedgerError { - -- | The ledger DB just /before/ this block was applied - annLedgerState :: LedgerDB l - - -- | Reference to the block that had the error - , annLedgerErrRef :: RealPoint blk - - -- | The ledger error itself - , annLedgerErr :: LedgerErr l - } - -type AnnLedgerError' blk = AnnLedgerError (ExtLedgerState blk) blk - -class Monad m => ThrowsLedgerError m l blk where - throwLedgerError :: LedgerDB l -> RealPoint blk -> LedgerErr l -> m a - -instance Monad m => ThrowsLedgerError (ExceptT (AnnLedgerError l blk) m) l blk where - throwLedgerError l r e = throwError $ AnnLedgerError l r e - -defaultThrowLedgerErrors :: ExceptT (AnnLedgerError l blk) m a - -> m (Either (AnnLedgerError l blk) a) -defaultThrowLedgerErrors = runExceptT - -defaultResolveWithErrors :: ResolveBlock m blk - -> ExceptT (AnnLedgerError l blk) - (ReaderT (ResolveBlock m blk) m) - a - -> m (Either (AnnLedgerError l blk) a) -defaultResolveWithErrors resolve = - defaultResolveBlocks resolve - . defaultThrowLedgerErrors - -{------------------------------------------------------------------------------- - LedgerDB management --------------------------------------------------------------------------------} - --- | Ledger DB starting at the specified ledger state -ledgerDbWithAnchor :: GetTip l => l -> LedgerDB l -ledgerDbWithAnchor anchor = LedgerDB { - ledgerDbCheckpoints = Empty (Checkpoint anchor) - } - --- | Transform the underlying 'AnchoredSeq' using the given functions. -ledgerDbBimap :: - Anchorable (WithOrigin SlotNo) a b - => (l -> a) - -> (l -> b) - -> LedgerDB l - -> AnchoredSeq (WithOrigin SlotNo) a b -ledgerDbBimap f g = - -- Instead of exposing 'ledgerDbCheckpoints' directly, this function hides - -- the internal 'Checkpoint' type. - AS.bimap (f . unCheckpoint) (g . unCheckpoint) . ledgerDbCheckpoints - --- | Prune snapshots until at we have at most @k@ snapshots in the LedgerDB, --- excluding the snapshots stored at the anchor. -ledgerDbPrune :: GetTip l => SecurityParam -> LedgerDB l -> LedgerDB l -ledgerDbPrune (SecurityParam k) db = db { - ledgerDbCheckpoints = AS.anchorNewest k (ledgerDbCheckpoints db) - } - - -- NOTE: we must inline 'ledgerDbPrune' otherwise we get unexplained thunks in - -- 'LedgerDB' and thus a space leak. Alternatively, we could disable the - -- @-fstrictness@ optimisation (enabled by default for -O1). See #2532. -{-# INLINE ledgerDbPrune #-} - -{------------------------------------------------------------------------------- - Internal updates --------------------------------------------------------------------------------} - --- | Push an updated ledger state -pushLedgerState :: - GetTip l - => SecurityParam - -> l -- ^ Updated ledger state - -> LedgerDB l -> LedgerDB l -pushLedgerState secParam current' db@LedgerDB{..} = - ledgerDbPrune secParam $ db { - ledgerDbCheckpoints = ledgerDbCheckpoints AS.:> Checkpoint current' - } - -{------------------------------------------------------------------------------- - Internal: rolling back --------------------------------------------------------------------------------} - --- | Rollback --- --- Returns 'Nothing' if maximum rollback is exceeded. -rollback :: GetTip l => Word64 -> LedgerDB l -> Maybe (LedgerDB l) -rollback n db@LedgerDB{..} - | n <= ledgerDbMaxRollback db - = Just db { - ledgerDbCheckpoints = AS.dropNewest (fromIntegral n) ledgerDbCheckpoints - } - | otherwise - = Nothing - -{------------------------------------------------------------------------------- - Updates --------------------------------------------------------------------------------} - --- | Exceeded maximum rollback supported by the current ledger DB state --- --- Under normal circumstances this will not arise. It can really only happen --- in the presence of data corruption (or when switching to a shorter fork, --- but that is disallowed by all currently known Ouroboros protocols). --- --- Records both the supported and the requested rollback. -data ExceededRollback = ExceededRollback { - rollbackMaximum :: Word64 - , rollbackRequested :: Word64 - } - -ledgerDbPush :: forall m c l blk. (ApplyBlock l blk, Monad m, c) - => LedgerDbCfg l - -> Ap m l blk c -> LedgerDB l -> m (LedgerDB l) -ledgerDbPush cfg ap db = - (\current' -> pushLedgerState (ledgerDbCfgSecParam cfg) current' db) <$> - applyBlock (ledgerDbCfg cfg) ap db - --- | Push a bunch of blocks (oldest first) -ledgerDbPushMany :: - forall m c l blk . (ApplyBlock l blk, Monad m, c) - => (Pushing blk -> m ()) - -> LedgerDbCfg l - -> [Ap m l blk c] -> LedgerDB l -> m (LedgerDB l) -ledgerDbPushMany trace cfg aps initDb = (repeatedlyM pushAndTrace) aps initDb - where - pushAndTrace ap db = do - let pushing = Pushing . toRealPoint $ ap - trace pushing - ledgerDbPush cfg ap db - --- | Switch to a fork -ledgerDbSwitch :: (ApplyBlock l blk, Monad m, c) - => LedgerDbCfg l - -> Word64 -- ^ How many blocks to roll back - -> (UpdateLedgerDbTraceEvent blk -> m ()) - -> [Ap m l blk c] -- ^ New blocks to apply - -> LedgerDB l - -> m (Either ExceededRollback (LedgerDB l)) -ledgerDbSwitch cfg numRollbacks trace newBlocks db = - case rollback numRollbacks db of - Nothing -> - return $ Left $ ExceededRollback { - rollbackMaximum = ledgerDbMaxRollback db - , rollbackRequested = numRollbacks - } - Just db' -> case newBlocks of - [] -> pure $ Right db' - -- no blocks to apply to ledger state, return current LedgerDB - (firstBlock:_) -> do - let start = PushStart . toRealPoint $ firstBlock - goal = PushGoal . toRealPoint . last $ newBlocks - Right <$> ledgerDbPushMany (trace . (StartedPushingBlockToTheLedgerDb start goal)) - cfg - newBlocks - db' - -{------------------------------------------------------------------------------- - Trace events --------------------------------------------------------------------------------} - -newtype PushStart blk = PushStart { unPushStart :: RealPoint blk } - deriving (Show, Eq) - -newtype PushGoal blk = PushGoal { unPushGoal :: RealPoint blk } - deriving (Show, Eq) - -newtype Pushing blk = Pushing { unPushing :: RealPoint blk } - deriving (Show, Eq) - -data UpdateLedgerDbTraceEvent blk = - -- | Event fired when we are about to push a block to the LedgerDB - StartedPushingBlockToTheLedgerDb - !(PushStart blk) - -- ^ Point from which we started pushing new blocks - (PushGoal blk) - -- ^ Point to which we are updating the ledger, the last event - -- StartedPushingBlockToTheLedgerDb will have Pushing and PushGoal - -- wrapping over the same RealPoint - !(Pushing blk) - -- ^ Point which block we are about to push - deriving (Show, Eq, Generic) - -{------------------------------------------------------------------------------- - Support for testing --------------------------------------------------------------------------------} - -pureBlock :: blk -> Ap m l blk () -pureBlock = ReapplyVal - -ledgerDbPush' :: ApplyBlock l blk - => LedgerDbCfg l -> blk -> LedgerDB l -> LedgerDB l -ledgerDbPush' cfg b = runIdentity . ledgerDbPush cfg (pureBlock b) - -ledgerDbPushMany' :: ApplyBlock l blk - => LedgerDbCfg l -> [blk] -> LedgerDB l -> LedgerDB l -ledgerDbPushMany' cfg bs = - runIdentity . ledgerDbPushMany (const $ pure ()) cfg (map pureBlock bs) - -ledgerDbSwitch' :: forall l blk. ApplyBlock l blk - => LedgerDbCfg l - -> Word64 -> [blk] -> LedgerDB l -> Maybe (LedgerDB l) -ledgerDbSwitch' cfg n bs db = - case runIdentity $ ledgerDbSwitch cfg n (const $ pure ()) (map pureBlock bs) db of - Left ExceededRollback{} -> Nothing - Right db' -> Just db' - diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/LedgerDB/V1/Args.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/LedgerDB/V1/Args.hs new file mode 100644 index 0000000000..0f8d19717f --- /dev/null +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/LedgerDB/V1/Args.hs @@ -0,0 +1,100 @@ +{-# LANGUAGE DataKinds #-} +{-# LANGUAGE DeriveAnyClass #-} +{-# LANGUAGE DeriveGeneric #-} +{-# LANGUAGE DerivingStrategies #-} +{-# LANGUAGE FlexibleInstances #-} +{-# LANGUAGE GADTs #-} +{-# LANGUAGE MultiParamTypeClasses #-} +{-# LANGUAGE NumericUnderscores #-} +{-# LANGUAGE PolyKinds #-} +{-# LANGUAGE TypeFamilies #-} + +module Ouroboros.Consensus.Storage.LedgerDB.V1.Args ( + BackingStoreArgs (..) + , FlushFrequency (..) + , LedgerDbFlavorArgs (..) + , QueryBatchSize (..) + , defaultLedgerDbFlavorArgs + , defaultQueryBatchSize + , defaultShouldFlush + ) where + +import Control.Monad.IO.Class +import qualified Data.SOP.Dict as Dict +import Data.Word +import GHC.Generics +import NoThunks.Class +import Ouroboros.Consensus.Storage.LedgerDB.V1.BackingStore.API +import Ouroboros.Consensus.Storage.LedgerDB.V1.BackingStore.Impl.LMDB +import Ouroboros.Consensus.Util.Args + +{------------------------------------------------------------------------------- + Arguments +-------------------------------------------------------------------------------} + +-- | The /maximum/ number of keys to read in a backing store range query. +-- +-- When performing a ledger state query that involves on-disk parts of the +-- ledger state, we might have to read ranges of key-value pair data (e.g., +-- UTxO) from disk using backing store range queries. Instead of reading all +-- data in one go, we read it in batches. 'QueryBatchSize' determines the size +-- of these batches. +-- +-- INVARIANT: Should be at least 1. +-- +-- It is fine if the result of a range read contains less than this number of +-- keys, but it should never return more. +data QueryBatchSize = + -- | A default value, which is determined by a specific 'DiskPolicy'. See + -- 'defaultDiskPolicy' as an example. + DefaultQueryBatchSize + -- | A requested value: the number of keys to read from disk in each batch. + | RequestedQueryBatchSize Word64 + + -- | To disable queries, to be used in tests + | DisableQuerySize + deriving (Show, Eq, Generic) + deriving anyclass NoThunks + +defaultQueryBatchSize :: QueryBatchSize -> Word64 +defaultQueryBatchSize requestedQueryBatchSize = case requestedQueryBatchSize of + RequestedQueryBatchSize value -> value + DefaultQueryBatchSize -> 100_000 + DisableQuerySize -> 0 + +-- | The number of diffs in the immutable part of the chain that we have to see +-- before we flush the ledger state to disk. See 'onDiskShouldFlush'. +-- +-- INVARIANT: Should be at least 0. +data FlushFrequency = + -- | A default value, which is determined by a specific 'SnapshotPolicy'. See + -- 'defaultSnapshotPolicy' as an example. + DefaultFlushFrequency + -- | A requested value: the number of diffs in the immutable part of the + -- chain required before flushing. + | RequestedFlushFrequency Word64 + -- | To disable flushing, to be used in tests + | DisableFlushing + deriving (Show, Eq, Generic) + +defaultShouldFlush :: FlushFrequency -> (Word64 -> Bool) +defaultShouldFlush requestedFlushFrequency = case requestedFlushFrequency of + RequestedFlushFrequency value -> (>= value) + DefaultFlushFrequency -> (>= 100) + DisableFlushing -> const False + +data LedgerDbFlavorArgs f m = V1Args { + v1FlushFrequency :: FlushFrequency + , v1QueryBatchSize :: QueryBatchSize + , v1BackendArgs :: BackingStoreArgs f m + } + +data BackingStoreArgs f m = + LMDBBackingStoreArgs (LiveLMDBFS m) (HKD f LMDBLimits) (Dict.Dict MonadIO m) + | InMemoryBackingStoreArgs + +defaultLedgerDbFlavorArgs :: Incomplete LedgerDbFlavorArgs m +defaultLedgerDbFlavorArgs = V1Args DefaultFlushFrequency DefaultQueryBatchSize defaultBackingStoreArgs + +defaultBackingStoreArgs :: Incomplete BackingStoreArgs m +defaultBackingStoreArgs = InMemoryBackingStoreArgs diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/LedgerDB/V1/BackingStore.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/LedgerDB/V1/BackingStore.hs new file mode 100644 index 0000000000..f7bea5f2e1 --- /dev/null +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/LedgerDB/V1/BackingStore.hs @@ -0,0 +1,121 @@ +{-# LANGUAGE DataKinds #-} +{-# LANGUAGE FlexibleContexts #-} +{-# LANGUAGE FlexibleInstances #-} +{-# LANGUAGE GADTs #-} +{-# LANGUAGE ScopedTypeVariables #-} +{-# LANGUAGE TypeFamilies #-} + +-- | See "Ouroboros.Consensus.Storage.LedgerDB.BackingStore.API" for the +-- documentation. This module just puts together the implementations for the +-- API, currently two: +-- +-- * "Ouroboros.Consensus.Storage.LedgerDB.BackingStore.Impl.InMemory": a @TVar@ +-- holding a "Data.Map". +-- +-- * "Ouroboros.Consensus.Storage.LedgerDB.BackingStore.Impl.LMDB": an external +-- disk-based database. +module Ouroboros.Consensus.Storage.LedgerDB.V1.BackingStore ( + -- * API + -- + -- | Most of the documentation on the behaviour of the 'BackingStore' lives + -- in this module. + module Ouroboros.Consensus.Storage.LedgerDB.V1.BackingStore.API + -- * Initialization + , newBackingStore + , restoreBackingStore + -- * Tracing + , FlavorImplSpecificTrace (..) + , FlavorImplSpecificTraceInMemory (..) + , FlavorImplSpecificTraceOnDisk (..) + -- * Testing + , newBackingStoreInitialiser + ) where + +import Cardano.Slotting.Slot +import Control.Tracer +import Data.Functor.Contravariant +import Data.SOP.Dict (Dict (..)) +import GHC.Stack (HasCallStack) +import Ouroboros.Consensus.Ledger.Basics +import Ouroboros.Consensus.Storage.LedgerDB.V1.Args +import Ouroboros.Consensus.Storage.LedgerDB.V1.BackingStore.API +import qualified Ouroboros.Consensus.Storage.LedgerDB.V1.BackingStore.Impl.InMemory as InMemory +import qualified Ouroboros.Consensus.Storage.LedgerDB.V1.BackingStore.Impl.LMDB as LMDB +import Ouroboros.Consensus.Util.Args +import Ouroboros.Consensus.Util.IOLike +import System.FS.API + +type BackingStoreInitialiser m l = + InitFrom (LedgerTables l ValuesMK) + -> m (LedgerBackingStore m l) + +-- | Overwrite the 'BackingStore' tables with the snapshot's tables +restoreBackingStore :: + ( IOLike m + , HasLedgerTables l + , CanSerializeLedgerTables l + , HasCallStack + ) + => Tracer m FlavorImplSpecificTrace + -> Complete BackingStoreArgs m + -> SnapshotsFS m + -> FsPath + -> m (LedgerBackingStore m l) +restoreBackingStore trcr bss fs loadPath = + newBackingStoreInitialiser trcr bss fs (InitFromCopy loadPath) + +-- | Create a 'BackingStore' from the given initial tables. +newBackingStore :: + ( IOLike m + , HasLedgerTables l + , CanSerializeLedgerTables l + , HasCallStack + ) + => Tracer m FlavorImplSpecificTrace + -> Complete BackingStoreArgs m + -> SnapshotsFS m + -> LedgerTables l ValuesMK + -> m (LedgerBackingStore m l) +newBackingStore trcr bss fs tables = + newBackingStoreInitialiser trcr bss fs (InitFromValues Origin tables) + +newBackingStoreInitialiser :: + forall m l. + ( IOLike m + , HasLedgerTables l + , CanSerializeLedgerTables l + , HasCallStack + ) + => Tracer m FlavorImplSpecificTrace + -> Complete BackingStoreArgs m + -> SnapshotsFS m + -> BackingStoreInitialiser m l +newBackingStoreInitialiser trcr bss = + case bss of + LMDBBackingStoreArgs fs limits Dict -> + LMDB.newLMDBBackingStore + (FlavorImplSpecificTraceOnDisk . OnDiskBackingStoreTrace >$< trcr) + limits + fs + InMemoryBackingStoreArgs -> + InMemory.newInMemoryBackingStore + (FlavorImplSpecificTraceInMemory . InMemoryBackingStoreTrace >$< trcr) + +{------------------------------------------------------------------------------- + Tracing +-------------------------------------------------------------------------------} + +data FlavorImplSpecificTrace = + FlavorImplSpecificTraceInMemory FlavorImplSpecificTraceInMemory + | FlavorImplSpecificTraceOnDisk FlavorImplSpecificTraceOnDisk + deriving (Eq, Show) + +data FlavorImplSpecificTraceInMemory = + InMemoryBackingStoreInitialise + | InMemoryBackingStoreTrace BackingStoreTrace + deriving (Eq, Show) + +data FlavorImplSpecificTraceOnDisk = + OnDiskBackingStoreInitialise LMDB.LMDBLimits + | OnDiskBackingStoreTrace BackingStoreTrace + deriving (Eq, Show) diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/LedgerDB/V1/BackingStore/API.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/LedgerDB/V1/BackingStore/API.hs new file mode 100644 index 0000000000..8248f33058 --- /dev/null +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/LedgerDB/V1/BackingStore/API.hs @@ -0,0 +1,284 @@ +{-# LANGUAGE DataKinds #-} +{-# LANGUAGE DeriveAnyClass #-} +{-# LANGUAGE DeriveGeneric #-} +{-# LANGUAGE DerivingVia #-} +{-# LANGUAGE FlexibleContexts #-} +{-# LANGUAGE NamedFieldPuns #-} +{-# LANGUAGE RankNTypes #-} +{-# LANGUAGE StandaloneDeriving #-} +{-# LANGUAGE UndecidableInstances #-} + +-- | The 'BackingStore' is the component of the +-- 'Ouroboros.Consensus.Storage.LedgerDB.LedgerDB' implementation that stores a +-- key-value map with the 'LedgerTable's at a specific slot on the chain. +-- +-- It is used for storing 'Ouroboros.Consensus.Ledger.Basics.LedgerState' data +-- structures, and updating them with t'Data.Map.Diff.Strict.Diff's produced by +-- executing the Ledger rules. +-- +-- See "Ouroboros.Consensus.Storage.LedgerDB.BackingStore" for the +-- implementations provided. +module Ouroboros.Consensus.Storage.LedgerDB.V1.BackingStore.API ( + -- * FileSystem newtypes + LiveLMDBFS (..) + , SnapshotsFS (..) + -- * Backing store + , BackingStore (..) + , BackingStore' + , DiffsToFlush (..) + , InitFrom (..) + , LedgerBackingStore + -- * Value handle + , BackingStoreValueHandle (..) + , BackingStoreValueHandle' + , LedgerBackingStoreValueHandle + , castBackingStoreValueHandle + , withBsValueHandle + -- * Query + , RangeQuery (..) + -- * Statistics + , Statistics (..) + -- * Tracing + , BackingStoreTrace (..) + , BackingStoreValueHandleTrace (..) + -- * 🧪 Testing + , bsRead + ) where + +import Cardano.Slotting.Slot (SlotNo, WithOrigin (..)) +import GHC.Generics +import NoThunks.Class (OnlyCheckWhnfNamed (..)) +import Ouroboros.Consensus.Ledger.Basics +import Ouroboros.Consensus.Ledger.Extended +import Ouroboros.Consensus.Util.IOLike +import System.FS.API +import qualified System.FS.API.Types as FS + +-- | The LedgerDB file system. Typically pointing to @/ledger@. +newtype SnapshotsFS m = SnapshotsFS { snapshotsFs :: SomeHasFS m } + deriving (Generic, NoThunks) + +-- | The LMDB file system. Typically pointing to @/lmdb@. +newtype LiveLMDBFS m = LiveLMDBFS { liveLMDBFs :: SomeHasFS m } + deriving (Generic, NoThunks) + +{------------------------------------------------------------------------------- + Backing store interface +-------------------------------------------------------------------------------} + +-- | A container for differences that are inteded to be flushed to a +-- 'BackingStore' +data DiffsToFlush l = DiffsToFlush { + -- | The set of differences that should be flushed into the 'BackingStore' + toFlushDiffs :: !(LedgerTables l DiffMK) + -- | At which slot the diffs were split. This must be the slot of the state + -- considered as "last flushed" in the kept 'DbChangelog' + , toFlushSlot :: !SlotNo + } + +data BackingStore m keys values diff = BackingStore { + -- | Close the backing store + -- + -- Other methods throw exceptions if called on a closed store. 'bsClose' + -- itself is idempotent. + bsClose :: !(m ()) + -- | Create a persistent copy + -- + -- Each backing store implementation will offer a way to initialize itself + -- from such a path. + -- + -- The destination path must not already exist. After this operation, it + -- will be a directory. + , bsCopy :: !(FS.FsPath -> m ()) + -- | Open a 'BackingStoreValueHandle' capturing the current value of the + -- entire database + , bsValueHandle :: !(m (BackingStoreValueHandle m keys values)) + -- | Apply a valid diff to the contents of the backing store + , bsWrite :: !(SlotNo -> diff -> m ()) + } + +deriving via OnlyCheckWhnfNamed "BackingStore" (BackingStore m keys values diff) + instance NoThunks (BackingStore m keys values diff) + +type LedgerBackingStore m l = + BackingStore m + (LedgerTables l KeysMK) + (LedgerTables l ValuesMK) + (LedgerTables l DiffMK) + +type BackingStore' m blk = LedgerBackingStore m (ExtLedgerState blk) + +-- | Choose how to initialize the backing store +data InitFrom values = + -- | Initialize from a set of values, at the given slot. + InitFromValues !(WithOrigin SlotNo) !values + -- | Use a snapshot at the given path to overwrite the set of values in the + -- opened database. + | InitFromCopy !FS.FsPath + +{------------------------------------------------------------------------------- + Value handles +-------------------------------------------------------------------------------} + +-- | An ephemeral handle to an immutable value of the entire database +-- +-- The performance cost is usually minimal unless this handle is held open too +-- long. We expect clients of the 'BackingStore' to not retain handles for a +-- long time. +data BackingStoreValueHandle m keys values = BackingStoreValueHandle { + -- | At which slot this handle was created + bsvhAtSlot :: !(WithOrigin SlotNo) + -- | Close the handle + -- + -- Other methods throw exceptions if called on a closed handle. 'bsvhClose' + -- itself is idempotent. + , bsvhClose :: !(m ()) + -- | See 'RangeQuery' + , bsvhRangeRead :: !(RangeQuery keys -> m values) + -- | Read the given keys from the handle + -- + -- Absent keys will merely not be present in the result instead of causing a + -- failure or an exception. + , bsvhRead :: !(keys -> m values) + -- | Retrieve statistics + , bsvhStat :: !(m Statistics) + } + +deriving via OnlyCheckWhnfNamed "BackingStoreValueHandle" (BackingStoreValueHandle m keys values) + instance NoThunks (BackingStoreValueHandle m keys values) + +type LedgerBackingStoreValueHandle m l = + BackingStoreValueHandle m + (LedgerTables l KeysMK) + (LedgerTables l ValuesMK) + +type BackingStoreValueHandle' m blk = LedgerBackingStoreValueHandle m (ExtLedgerState blk) + +castBackingStoreValueHandle :: + Functor m + => (values -> values') + -> (keys' -> keys) + -> BackingStoreValueHandle m keys values + -> BackingStoreValueHandle m keys' values' +castBackingStoreValueHandle f g bsvh = + BackingStoreValueHandle { + bsvhAtSlot + , bsvhClose + , bsvhRangeRead = \(RangeQuery prev count) -> + fmap f . bsvhRangeRead $ RangeQuery (fmap g prev) count + , bsvhRead = fmap f . bsvhRead . g + , bsvhStat + } + where + BackingStoreValueHandle { + bsvhClose + , bsvhAtSlot + , bsvhRangeRead + , bsvhRead + , bsvhStat + } = bsvh + +-- | A combination of 'bsValueHandle' and 'bsvhRead' +bsRead :: + MonadThrow m + => BackingStore m keys values diff + -> keys + -> m (WithOrigin SlotNo, values) +bsRead store keys = withBsValueHandle store $ \vh -> do + values <- bsvhRead vh keys + pure (bsvhAtSlot vh, values) + +-- | A 'IOLike.bracket'ed 'bsValueHandle' +withBsValueHandle :: + MonadThrow m + => BackingStore m keys values diff + -> (BackingStoreValueHandle m keys values -> m a) + -> m a +withBsValueHandle store = + bracket + (bsValueHandle store) + bsvhClose + +{------------------------------------------------------------------------------- + Query +-------------------------------------------------------------------------------} + +-- | The arguments for a query to the backing store, it is up to the particular +-- function that is performing the query to construct a value of this type, run +-- the query and, if appropriate, repeat this process to do a subsequent query. +data RangeQuery keys = RangeQuery { + -- | The result of this range query begin at first key that is strictly + -- greater than the greatest key in 'rqPrev'. + -- + -- If the given set of keys is 'Just' but contains no keys, then the query + -- will return no results. (This is the steady-state once a looping range + -- query reaches the end of the table.) + rqPrev :: !(Maybe keys) + -- | Roughly how many values to read. + -- + -- The query may return a different number of values than this even if it + -- has not reached the last key. The only crucial invariant is that the + -- query only returns an empty map if there are no more keys to read on + -- disk. + -- + -- FIXME: #4398 can we satisfy this invariant if we read keys from disk + -- but all of them were deleted in the changelog? + , rqCount :: !Int + } + deriving stock (Show, Eq) + +{------------------------------------------------------------------------------- + Statistics +-------------------------------------------------------------------------------} + +-- | Statistics for a key-value store. +-- +-- Using 'bsvhStat' on a value handle only provides statistics for the on-disk +-- state of a key-value store. Combine this with information from a +-- 'DbChangelog' to obtain statistics about a "logical" state of the key-value +-- store. See 'getStatistics'. +data Statistics = Statistics { + -- | The last slot number for which key-value pairs were stored. + -- + -- INVARIANT: the 'sequenceNumber' returned by using 'bsvhStat' on a value + -- handle should match 'bsvhAtSlot' for that same value handle. + sequenceNumber :: !(WithOrigin SlotNo) + -- | The total number of key-value pair entries that are stored. + , numEntries :: !Int + } + deriving stock (Show, Eq) + +{------------------------------------------------------------------------------- + Tracing +-------------------------------------------------------------------------------} + +data BackingStoreTrace = + BSOpening + | BSOpened !(Maybe FS.FsPath) + | BSInitialisingFromCopy !FS.FsPath + | BSInitialisedFromCopy !FS.FsPath + | BSInitialisingFromValues !(WithOrigin SlotNo) + | BSInitialisedFromValues !(WithOrigin SlotNo) + | BSClosing + | BSAlreadyClosed + | BSClosed + | BSCopying !FS.FsPath + | BSCopied !FS.FsPath + | BSCreatingValueHandle + | BSValueHandleTrace !(Maybe Int) !BackingStoreValueHandleTrace + | BSCreatedValueHandle + | BSWriting !SlotNo + | BSWritten !(WithOrigin SlotNo) !SlotNo + deriving (Eq, Show) + +data BackingStoreValueHandleTrace = + BSVHClosing + | BSVHAlreadyClosed + | BSVHClosed + | BSVHRangeReading + | BSVHRangeRead + | BSVHReading + | BSVHRead + | BSVHStatting + | BSVHStatted + deriving (Eq, Show) diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/LedgerDB/V1/BackingStore/Impl/InMemory.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/LedgerDB/V1/BackingStore/Impl/InMemory.hs new file mode 100644 index 0000000000..3b3c56f787 --- /dev/null +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/LedgerDB/V1/BackingStore/Impl/InMemory.hs @@ -0,0 +1,307 @@ +{-# LANGUAGE DeriveAnyClass #-} +{-# LANGUAGE DeriveGeneric #-} +{-# LANGUAGE DerivingStrategies #-} +{-# LANGUAGE FlexibleContexts #-} +{-# LANGUAGE LambdaCase #-} +{-# LANGUAGE OverloadedStrings #-} +{-# LANGUAGE RankNTypes #-} +{-# LANGUAGE ScopedTypeVariables #-} +{-# LANGUAGE StandaloneDeriving #-} +{-# LANGUAGE UndecidableInstances #-} + +-- | An implementation of a 'BackingStore' using a TVar. This is the +-- implementation known as \"InMemory\". +module Ouroboros.Consensus.Storage.LedgerDB.V1.BackingStore.Impl.InMemory ( + -- * Constructor + newInMemoryBackingStore + -- * Errors + , InMemoryBackingStoreExn (..) + , InMemoryBackingStoreInitExn (..) + ) where + +import Cardano.Binary as CBOR +import Cardano.Slotting.Slot (SlotNo, WithOrigin (..)) +import qualified Codec.CBOR.Read as CBOR +import qualified Codec.CBOR.Write as CBOR +import Control.Monad (join, unless, void, when) +import Control.Monad.Class.MonadThrow (catch) +import Control.Tracer (Tracer, traceWith) +import qualified Data.ByteString.Lazy as BSL +import qualified Data.Map.Strict as Map +import Data.Monoid (Sum (..)) +import qualified Data.Set as Set +import Data.String (fromString) +import GHC.Generics +import Ouroboros.Consensus.Ledger.Basics +import qualified Ouroboros.Consensus.Ledger.Tables.Diff as Diff +import Ouroboros.Consensus.Storage.LedgerDB.V1.BackingStore.API +import Ouroboros.Consensus.Util.IOLike (Exception, IOLike, + MonadSTM (STM, atomically), MonadThrow (throwIO), NoThunks, + StrictTVar, newTVarIO, readTVar, throwSTM, writeTVar) +import Prelude hiding (lookup) +import System.FS.API + (HasFS (createDirectory, doesDirectoryExist, doesFileExist, mkFsErrorPath), + SomeHasFS (SomeHasFS), withFile) +import System.FS.API.Lazy (hGetAll, hPutAll) +import System.FS.API.Types (AllowExisting (MustBeNew), FsErrorPath, + FsPath (fsPathToList), OpenMode (ReadMode, WriteMode), + fsPathFromList) + +{------------------------------------------------------------------------------- + An in-memory backing store +-------------------------------------------------------------------------------} + +data BackingStoreContents m l = + BackingStoreContentsClosed + | BackingStoreContents + !(WithOrigin SlotNo) + !(LedgerTables l ValuesMK) + deriving (Generic) + +deriving instance ( NoThunks (Key l) + , NoThunks (Value l) + ) => NoThunks (BackingStoreContents m l) + +-- | Use a 'TVar' as a trivial backing store +newInMemoryBackingStore :: + forall l m. + ( IOLike m + , CanSerializeLedgerTables l + , HasLedgerTables l + ) + => Tracer m BackingStoreTrace + -> SnapshotsFS m + -> InitFrom (LedgerTables l ValuesMK) + -> m (LedgerBackingStore m l) +newInMemoryBackingStore tracer (SnapshotsFS (SomeHasFS fs)) initialization = do + traceWith tracer BSOpening + ref <- do + (slot, values) <- case initialization of + InitFromCopy path -> do + traceWith tracer $ BSInitialisingFromCopy path + tvarFileExists <- doesFileExist fs (extendPath path) + unless tvarFileExists $ + throwIO . StoreDirIsIncompatible $ mkFsErrorPath fs path + withFile fs (extendPath path) ReadMode $ \h -> do + bs <- hGetAll fs h + case CBOR.deserialiseFromBytes ((,) <$> CBOR.fromCBOR <*> valuesMKDecoder) bs of + Left err -> throwIO $ InMemoryBackingStoreDeserialiseExn err + Right (extra, x) -> do + unless (BSL.null extra) $ throwIO InMemoryIncompleteDeserialiseExn + traceWith tracer $ BSInitialisedFromCopy path + pure x + InitFromValues slot values -> do + traceWith tracer $ BSInitialisingFromValues slot + pure (slot, values) + newTVarIO $ BackingStoreContents slot values + traceWith tracer $ BSOpened Nothing + pure BackingStore { + bsClose = do + traceWith tracer BSClosing + catch + (atomically $ do + guardClosed ref + writeTVar ref BackingStoreContentsClosed + ) + (\case + InMemoryBackingStoreClosedExn -> traceWith tracer BSAlreadyClosed + e -> throwIO e + ) + traceWith tracer BSClosed + , bsCopy = \path -> do + traceWith tracer $ BSCopying path + join $ atomically $ do + readTVar ref >>= \case + BackingStoreContentsClosed -> + throwSTM InMemoryBackingStoreClosedExn + BackingStoreContents slot values -> pure $ do + exists <- doesDirectoryExist fs path + when exists $ throwIO InMemoryBackingStoreDirectoryExists + createDirectory fs path + withFile fs (extendPath path) (WriteMode MustBeNew) $ \h -> + void $ hPutAll fs h + $ CBOR.toLazyByteString + $ CBOR.toCBOR slot <> valuesMKEncoder values + traceWith tracer $ BSCopied path + , bsValueHandle = do + traceWith tracer BSCreatingValueHandle + vh <- join $ atomically $ do + readTVar ref >>= \case + BackingStoreContentsClosed -> + throwSTM InMemoryBackingStoreClosedExn + BackingStoreContents slot values -> pure $ do + refHandleClosed <- newTVarIO False + pure $ BackingStoreValueHandle { + bsvhAtSlot = slot + , bsvhClose = do + traceWith tracer $ BSValueHandleTrace Nothing BSVHClosing + catch + (atomically $ do + guardClosed ref + guardHandleClosed refHandleClosed + writeTVar refHandleClosed True + ) + (\case + InMemoryBackingStoreClosedExn -> + traceWith tracer BSAlreadyClosed + InMemoryBackingStoreValueHandleClosedExn -> + traceWith tracer (BSValueHandleTrace Nothing BSVHAlreadyClosed) + e -> + throwIO e + ) + traceWith tracer $ BSValueHandleTrace Nothing BSVHClosed + , bsvhRangeRead = \rq -> do + traceWith tracer $ BSValueHandleTrace Nothing BSVHRangeReading + r <- atomically $ do + guardClosed ref + guardHandleClosed refHandleClosed + pure $ rangeRead rq values + traceWith tracer $ BSValueHandleTrace Nothing BSVHRangeRead + pure r + , bsvhRead = \keys -> do + traceWith tracer $ BSValueHandleTrace Nothing BSVHReading + r <- atomically $ do + guardClosed ref + guardHandleClosed refHandleClosed + pure $ lookup keys values + traceWith tracer $ BSValueHandleTrace Nothing BSVHRead + pure r + , bsvhStat = do + traceWith tracer $ BSValueHandleTrace Nothing BSVHStatting + r <- atomically $ do + guardClosed ref + guardHandleClosed refHandleClosed + pure $ Statistics slot (count values) + traceWith tracer $ BSValueHandleTrace Nothing BSVHStatted + pure r + } + traceWith tracer BSCreatedValueHandle + pure vh + , bsWrite = \slot2 diff -> do + traceWith tracer $ BSWriting slot2 + slot1 <- atomically $ do + readTVar ref >>= \case + BackingStoreContentsClosed -> + throwSTM InMemoryBackingStoreClosedExn + BackingStoreContents slot1 values -> do + unless (slot1 <= At slot2) $ + throwSTM $ InMemoryBackingStoreNonMonotonicSeq (At slot2) slot1 + writeTVar ref $ + BackingStoreContents + (At slot2) + (forwardValues values diff) + pure slot1 + traceWith tracer $ BSWritten slot1 slot2 + } + where + extendPath path = + fsPathFromList $ fsPathToList path <> [fromString "tvar"] + + lookup :: LedgerTables l KeysMK + -> LedgerTables l ValuesMK + -> LedgerTables l ValuesMK + lookup = ltliftA2 lookup' + + lookup' :: + Ord k + => KeysMK k v + -> ValuesMK k v + -> ValuesMK k v + lookup' (KeysMK ks) (ValuesMK vs) = + ValuesMK (Map.restrictKeys vs ks) + + + rangeRead :: RangeQuery (LedgerTables l KeysMK) + -> LedgerTables l ValuesMK + -> LedgerTables l ValuesMK + rangeRead rq values = case rqPrev rq of + Nothing -> + ltmap (rangeRead0' (rqCount rq)) values + Just keys -> + ltliftA2 (rangeRead' (rqCount rq)) keys values + + rangeRead0' :: + Int + -> ValuesMK k v + -> ValuesMK k v + rangeRead0' n (ValuesMK vs) = + ValuesMK $ Map.take n vs + + rangeRead' :: + Ord k + => Int + -> KeysMK k v + -> ValuesMK k v + -> ValuesMK k v + rangeRead' n (KeysMK ks) (ValuesMK vs) = + case Set.lookupMax ks of + Nothing -> ValuesMK Map.empty + Just k -> ValuesMK $ Map.take n $ snd $ Map.split k vs + + forwardValues :: LedgerTables l ValuesMK + -> LedgerTables l DiffMK + -> LedgerTables l ValuesMK + forwardValues = ltliftA2 applyDiff_ + + applyDiff_ :: + Ord k + => ValuesMK k v + -> DiffMK k v + -> ValuesMK k v + applyDiff_ (ValuesMK values) (DiffMK diff) = + ValuesMK (Diff.applyDiff values diff) + + count :: LedgerTables l ValuesMK -> Int + count = getSum . ltcollapse . ltmap (K2 . count') + + count' :: ValuesMK k v -> Sum Int + count' (ValuesMK values) = Sum $ Map.size values + +guardClosed :: + IOLike m + => StrictTVar m (BackingStoreContents ks vs) + -> STM m () +guardClosed ref = readTVar ref >>= \case + BackingStoreContentsClosed -> throwSTM InMemoryBackingStoreClosedExn + BackingStoreContents _ _ -> pure () + +guardHandleClosed :: + IOLike m + => StrictTVar m Bool + -> STM m () +guardHandleClosed refHandleClosed = do + isClosed <- readTVar refHandleClosed + when isClosed $ throwSTM InMemoryBackingStoreValueHandleClosedExn + +{------------------------------------------------------------------------------- + Errors +-------------------------------------------------------------------------------} + +-- | Errors that the InMemory backing store can throw on runtime. +-- +-- __WARNING__: these errors will be thrown in IO as having a corrupt database +-- is critical for the functioning of Consensus. +data InMemoryBackingStoreExn = + InMemoryBackingStoreClosedExn + | InMemoryBackingStoreValueHandleClosedExn + | InMemoryBackingStoreDirectoryExists + | InMemoryBackingStoreNonMonotonicSeq !(WithOrigin SlotNo) !(WithOrigin SlotNo) + | InMemoryBackingStoreDeserialiseExn CBOR.DeserialiseFailure + | InMemoryIncompleteDeserialiseExn + deriving anyclass (Exception) + deriving stock (Show) + +-- | Errors that the InMemory backing store can throw on initialization. +-- +-- __WARNING__: these errors will be thrown in IO as having a corrupt database +-- is critical for the functioning of Consensus. +newtype InMemoryBackingStoreInitExn = + StoreDirIsIncompatible FsErrorPath + deriving anyclass (Exception) + +instance Show InMemoryBackingStoreInitExn where + show (StoreDirIsIncompatible p) = + "In-Memory database not found in the database directory: " + <> show p + <> ".\nPre-UTxO-HD and LMDB implementations are incompatible with the In-Memory \ + \ implementation. Please delete your ledger database directory." diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/LedgerDB/V1/BackingStore/Impl/LMDB.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/LedgerDB/V1/BackingStore/Impl/LMDB.hs new file mode 100644 index 0000000000..bffa557eee --- /dev/null +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/LedgerDB/V1/BackingStore/Impl/LMDB.hs @@ -0,0 +1,716 @@ +{-# LANGUAGE DataKinds #-} +{-# LANGUAGE DeriveAnyClass #-} +{-# LANGUAGE DeriveGeneric #-} +{-# LANGUAGE DerivingVia #-} +{-# LANGUAGE GADTs #-} +{-# LANGUAGE LambdaCase #-} +{-# LANGUAGE NamedFieldPuns #-} +{-# LANGUAGE OverloadedStrings #-} +{-# LANGUAGE PatternSynonyms #-} +{-# LANGUAGE ScopedTypeVariables #-} +{-# LANGUAGE TypeOperators #-} + +-- | A 'BackingStore' implementation based on [LMDB](http://www.lmdb.tech/doc/). +module Ouroboros.Consensus.Storage.LedgerDB.V1.BackingStore.Impl.LMDB ( + -- * Opening a database + LMDBLimits (LMDBLimits, lmdbMapSize, lmdbMaxDatabases, lmdbMaxReaders) + , newLMDBBackingStore + -- * Errors + , LMDBErr (..) + -- * Internals exposed for @snapshot-converter@ + , DbState (..) + , LMDBMK (..) + , getDb + , initLMDBTable + , withDbStateRWMaybeNull + ) where + +import Cardano.Slotting.Slot (SlotNo, WithOrigin (At)) +import qualified Codec.Serialise as S (Serialise (..)) +import qualified Control.Concurrent.Class.MonadSTM.TVar as IOLike +import Control.Monad (forM_, unless, void, when) +import qualified Control.Monad.Class.MonadSTM as IOLike +import Control.Monad.IO.Class (MonadIO (liftIO)) +import qualified Control.Tracer as Trace +import Data.Functor (($>), (<&>)) +import Data.Functor.Contravariant ((>$<)) +import Data.Map (Map) +import qualified Data.Map.Strict as Map +import Data.Monoid (Sum (..)) +import qualified Data.Set as Set +import qualified Data.Text as Strict +import qualified Database.LMDB.Simple as LMDB +import qualified Database.LMDB.Simple.Cursor as LMDB.Cursor +import qualified Database.LMDB.Simple.Extra as LMDB +import qualified Database.LMDB.Simple.Internal as LMDB.Internal +import qualified Database.LMDB.Simple.TransactionHandle as TrH +import GHC.Generics (Generic) +import GHC.Stack (HasCallStack) +import Ouroboros.Consensus.Ledger.Tables +import qualified Ouroboros.Consensus.Ledger.Tables.Diff as Diff +import qualified Ouroboros.Consensus.Storage.LedgerDB.V1.BackingStore.API as API +import qualified Ouroboros.Consensus.Storage.LedgerDB.V1.BackingStore.Impl.LMDB.Bridge as Bridge +import Ouroboros.Consensus.Storage.LedgerDB.V1.BackingStore.Impl.LMDB.Status + (Status (..), StatusLock) +import qualified Ouroboros.Consensus.Storage.LedgerDB.V1.BackingStore.Impl.LMDB.Status as Status +import Ouroboros.Consensus.Util (foldlM') +import Ouroboros.Consensus.Util.IOLike (Exception (..), IOLike, + MonadCatch (..), MonadThrow (..), bracket) +import qualified System.FS.API as FS + +{------------------------------------------------------------------------------- + Database definition +-------------------------------------------------------------------------------} + +-- | The LMDB database that underlies the backing store. +data Db m l = Db { + -- | The LMDB environment is a pointer to the directory that contains the + -- @`Db`@. + dbEnv :: !(LMDB.Environment LMDB.ReadWrite) + -- | The on-disk state of the @`Db`@. + -- + -- The state is kept in an LDMB table with only one key and one value: + -- The current sequence number of the @`Db`@. + , dbState :: !(LMDB.Database () DbState) + -- | The LMDB tables with the key-value stores. + , dbBackingTables :: !(LedgerTables l LMDBMK) + , dbFilePath :: !FilePath + , dbTracer :: !(Trace.Tracer m API.BackingStoreTrace) + -- | Status of the LMDB backing store. When 'Closed', all backing store + -- (value handle) operations will fail. + , dbStatusLock :: !(StatusLock m) + -- | Map of open value handles to cleanup actions. When closing the backing + -- store, these cleanup actions are used to ensure all value handles cleaned + -- up. + -- + -- Note: why not use 'bsvhClose' here? We would get nested lock acquisition + -- on 'dbStatusLock', which causes a deadlock: + -- + -- * 'bsClose' acquires a write lock + -- + -- * 'bsvhClose' is called on a value handle + -- + -- * 'bsvhClose' tries to acquire a read lock, but it has to wait for + -- 'bsClose' to give up its write lock + , dbOpenHandles :: !(IOLike.TVar m (Map Int (Cleanup m))) + , dbNextId :: !(IOLike.TVar m Int) + } + +newtype LMDBLimits = MkLMDBLimits {unLMDBLimits :: LMDB.Limits} + deriving (Show, Eq) + +{-# COMPLETE LMDBLimits #-} +-- | Configuration to use for LMDB backing store initialisation. +-- +-- Keep the following in mind: +-- +-- * @'lmdbMapSize'@ should be a multiple of the OS page size. +-- +-- * @'lmdbMaxDatabases'@ should be set to at least 2, since the backing store +-- has 2 internal LMDB databases by default: 1 for the actual tables, and +-- 1 for the database state @'DbState'@. +pattern LMDBLimits :: Int -> Int -> Int -> LMDBLimits +pattern LMDBLimits{lmdbMapSize, lmdbMaxDatabases, lmdbMaxReaders} = + MkLMDBLimits LMDB.Limits { + LMDB.mapSize = lmdbMapSize + , LMDB.maxDatabases = lmdbMaxDatabases + , LMDB.maxReaders = lmdbMaxReaders + } + +-- | The database state consists of only the database sequence number @dbsSeq@. +-- @dbsSeq@ represents the slot up to which we have flushed changes to disk. +-- Note that we only flush changes to disk if they have become immutable. +newtype DbState = DbState { + dbsSeq :: WithOrigin SlotNo + } + deriving stock (Show, Generic) + deriving anyclass S.Serialise + +-- | A 'MapKind' that represents an LMDB database +data LMDBMK k v = LMDBMK String !(LMDB.Database k v) + +{------------------------------------------------------------------------------- + Low-level API +-------------------------------------------------------------------------------} + +getDb :: + LMDB.Internal.IsMode mode + => K2 String k v + -> LMDB.Transaction mode (LMDBMK k v) +getDb (K2 name) = LMDBMK name <$> LMDB.getDatabase (Just name) + +-- | @'rangeRead' n db codec ksMay@ performs a range read of @count@ values from +-- database @db@, starting from some key depending on @ksMay@. +-- +-- The @codec@ argument defines how to serialise/deserialise keys and values. +-- +-- A range read can return less than @count@ values if there are not enough +-- values to read. +-- +-- Note: See @`RangeQuery`@ for more information about range queries. In +-- particular, @'rqPrev'@ describes the role of @ksMay@. +-- +-- What the "first" key in the database is, and more generally in which order +-- keys are read, depends on the lexographical ordering of the /serialised/ +-- keys. Care should be taken such that the @'Ord'@ instance for @k@ matches the +-- lexicographical ordering of the serialised keys, or the result of this +-- function will be unexpected. +rangeRead :: + forall k v mode. Ord k + => Int + -> LMDBMK k v + -> CodecMK k v + -> (Maybe :..: KeysMK) k v + -> LMDB.Transaction mode (ValuesMK k v) +rangeRead count dbMK codecMK ksMK = + ValuesMK <$> case unComp2 ksMK of + Nothing -> runCursorHelper Nothing + Just (KeysMK ks) -> case Set.lookupMax ks of + Nothing -> pure mempty + Just lastExcludedKey -> + runCursorHelper $ Just (lastExcludedKey, LMDB.Cursor.Exclusive) + where + LMDBMK _ db = dbMK + + runCursorHelper :: + Maybe (k, LMDB.Cursor.Bound) -- ^ Lower bound on read range + -> LMDB.Transaction mode (Map k v) + runCursorHelper lb = + Bridge.runCursorAsTransaction' + (LMDB.Cursor.cgetMany lb count) + db + codecMK + +initLMDBTable :: + LMDBMK k v + -> CodecMK k v + -> ValuesMK k v + -> LMDB.Transaction LMDB.ReadWrite (EmptyMK k v) +initLMDBTable (LMDBMK tblName db) codecMK (ValuesMK utxoVals) = + EmptyMK <$ lmdbInitTable + where + lmdbInitTable = do + isEmpty <- LMDB.null db + unless isEmpty $ liftIO . throwIO $ LMDBErrInitialisingNonEmpty tblName + void $ Map.traverseWithKey + (Bridge.put codecMK db) + utxoVals + +readLMDBTable :: + Ord k + => LMDBMK k v + -> CodecMK k v + -> KeysMK k v + -> LMDB.Transaction mode (ValuesMK k v) +readLMDBTable (LMDBMK _ db) codecMK (KeysMK keys) = + ValuesMK <$> lmdbReadTable + where + lmdbReadTable = foldlM' go Map.empty (Set.toList keys) + where + go m k = Bridge.get codecMK db k <&> \case + Nothing -> m + Just v -> Map.insert k v m + +writeLMDBTable :: + LMDBMK k v + -> CodecMK k v + -> DiffMK k v + -> LMDB.Transaction LMDB.ReadWrite (EmptyMK k v) +writeLMDBTable (LMDBMK _ db) codecMK (DiffMK d) = + EmptyMK <$ lmdbWriteTable + where + lmdbWriteTable = void $ Diff.traverseDeltaWithKey_ go d + where + go k de = case de of + Diff.Delete -> void $ Bridge.delete codecMK db k + Diff.Insert v -> Bridge.put codecMK db k v + +{------------------------------------------------------------------------------- + Db state +-------------------------------------------------------------------------------} + +readDbStateMaybeNull :: + LMDB.Database () DbState + -> LMDB.Transaction mode (Maybe DbState) +readDbStateMaybeNull db = LMDB.get db () + +readDbState :: + LMDB.Database () DbState + -> LMDB.Transaction mode DbState +readDbState db = readDbStateMaybeNull db >>= maybe (liftIO . throwIO $ LMDBErrNoDbState) pure + +withDbStateRW :: + LMDB.Database () DbState + -> (DbState -> LMDB.Transaction LMDB.ReadWrite (a, DbState)) + -> LMDB.Transaction LMDB.ReadWrite a +withDbStateRW db f = withDbStateRWMaybeNull db $ maybe (liftIO . throwIO $ LMDBErrNoDbState) f + +withDbStateRWMaybeNull :: + LMDB.Database () DbState + -> (Maybe DbState -> LMDB.Transaction LMDB.ReadWrite (a, DbState)) + -> LMDB.Transaction LMDB.ReadWrite a +withDbStateRWMaybeNull db f = + readDbStateMaybeNull db >>= f >>= \(r, sNew) -> LMDB.put db () (Just sNew) $> r + +{------------------------------------------------------------------------------- + Guards +-------------------------------------------------------------------------------} + +data GuardDbDir = DirMustExist | DirMustNotExist + +-- | Guard for the existence/non-existence of a database directory, +-- and create it if missing. +guardDbDir :: + (MonadIO m, IOLike m) + => GuardDbDir + -> FS.SomeHasFS m + -> FS.FsPath + -> m FilePath +guardDbDir mustExistDir (FS.SomeHasFS fs) path = do + fileEx <- FS.doesFileExist fs path + when fileEx $ + throwIO $ LMDBErrNotADir path + dirEx <- FS.doesDirectoryExist fs path + lmdbFileExists <- FS.doesFileExist fs path { FS.fsPathToList = FS.fsPathToList path ++ [Strict.pack "data.mdb"] } + filepath <- FS.unsafeToFilePath fs path + case dirEx of + True | DirMustNotExist <- mustExistDir -> throwIO $ LMDBErrDirExists filepath + | not lmdbFileExists -> throwIO $ LMDBErrDirIsNotLMDB filepath + False | DirMustExist <- mustExistDir -> throwIO $ LMDBErrDirDoesntExist filepath + _ -> pure () + FS.createDirectoryIfMissing fs True path + pure filepath + +-- | Same as @`guardDbDir`@, but retries the guard if we can make meaningful +-- changes to the filesystem before we perform the retry. +-- +-- Note: We only retry if a database directory exists while it shoudn't. In +-- this case, we remove the directory recursively before retrying the guard. +-- This is necessary for initialisation of the LMDB backing store, since the +-- (non-snapshot) tables will probably still be on-disk. These tables are not +-- removed when stopping the node, so they should be "overwritten". +guardDbDirWithRetry :: + (MonadIO m, IOLike m) + => GuardDbDir + -> FS.SomeHasFS m + -> FS.FsPath + -> m FilePath +guardDbDirWithRetry gdd shfs@(FS.SomeHasFS fs) path = + handle retryHandler (guardDbDir gdd shfs path) + where + retryHandler e = case (gdd, e) of + (DirMustNotExist, LMDBErrDirExists _path) -> do + FS.removeDirectoryRecursive fs path + guardDbDir DirMustNotExist shfs path + _ -> throwIO e + +{------------------------------------------------------------------------------- + Initialize an LMDB +-------------------------------------------------------------------------------} + +-- | Initialise an LMDB database from these provided values. +initFromVals :: + (HasLedgerTables l, CanSerializeLedgerTables l, MonadIO m) + => Trace.Tracer m API.BackingStoreTrace + -> WithOrigin SlotNo + -- ^ The slot number up to which the ledger tables contain values. + -> LedgerTables l ValuesMK + -- ^ The ledger tables to initialise the LMDB database tables with. + -> LMDB.Environment LMDB.Internal.ReadWrite + -- ^ The LMDB environment. + -> LMDB.Database () DbState + -- ^ The state of the tables we are going to initialize the db with. + -> LedgerTables l LMDBMK + -> m () +initFromVals tracer dbsSeq vals env st backingTables = do + Trace.traceWith tracer $ API.BSInitialisingFromValues dbsSeq + liftIO $ LMDB.readWriteTransaction env $ + withDbStateRWMaybeNull st $ \case + Nothing -> ltzipWith3A initLMDBTable backingTables codecLedgerTables vals + $> ((), DbState{dbsSeq}) + Just _ -> liftIO . throwIO $ LMDBErrInitialisingAlreadyHasState + Trace.traceWith tracer $ API.BSInitialisedFromValues dbsSeq + +-- | Initialise an LMDB database from an existing LMDB database. +initFromLMDBs :: + (MonadIO m, IOLike m) + => Trace.Tracer m API.BackingStoreTrace + -> LMDBLimits + -- ^ Configuration for the LMDB database that we initialise from. + -> API.SnapshotsFS m + -- ^ Abstraction over the filesystem. + -> FS.FsPath + -- ^ The path that contains the LMDB database that we want to initialise from. + -> API.LiveLMDBFS m + -- ^ Abstraction over the filesystem. + -> FS.FsPath + -- ^ The path where the new LMDB database should be initialised. + -> m () +initFromLMDBs tracer limits (API.SnapshotsFS shfsFrom@(FS.SomeHasFS fsFrom)) from0 (API.LiveLMDBFS shfsTo) to0 = do + Trace.traceWith tracer $ API.BSInitialisingFromCopy from0 + from <- guardDbDir DirMustExist shfsFrom from0 + -- On Windows, if we don't choose the mapsize carefully it will make the + -- snapshot grow. Therefore we are using the current filesize as mapsize + -- when opening the snapshot to avoid this. + stat <- FS.withFile fsFrom (from0 { FS.fsPathToList = FS.fsPathToList from0 ++ [Strict.pack "data.mdb"] }) FS.ReadMode (FS.hGetSize fsFrom) + to <- guardDbDirWithRetry DirMustNotExist shfsTo to0 + bracket + (liftIO $ LMDB.openEnvironment from ((unLMDBLimits limits) { LMDB.mapSize = fromIntegral stat })) + (liftIO . LMDB.closeEnvironment) + (flip (lmdbCopy from0 tracer) to) + Trace.traceWith tracer $ API.BSInitialisedFromCopy from0 + +-- | Copy an existing LMDB database to a given directory. +lmdbCopy :: MonadIO m + => FS.FsPath + -> Trace.Tracer m API.BackingStoreTrace + -> LMDB.Environment LMDB.ReadWrite + -- ^ The environment in which the LMDB database lives. + -> FilePath + -- ^ The path where the copy should reside. + -> m () +lmdbCopy from0 tracer e to = do + Trace.traceWith tracer $ API.BSCopying from0 + liftIO $ LMDB.copyEnvironment e to + Trace.traceWith tracer $ API.BSCopied from0 + +-- | Initialise a backing store. +newLMDBBackingStore :: + forall m l. (HasCallStack, HasLedgerTables l, CanSerializeLedgerTables l, MonadIO m, IOLike m) + => Trace.Tracer m API.BackingStoreTrace + -> LMDBLimits + -- ^ Configuration parameters for the LMDB database that we + -- initialise. In case we initialise the LMDB database from + -- an existing LMDB database, we use these same configuration parameters + -- to open the existing LMDB database. + -> API.LiveLMDBFS m + -- ^ The FS for the LMDB live database + -> API.SnapshotsFS m + -> API.InitFrom (LedgerTables l ValuesMK) + -> m (API.LedgerBackingStore m l) +newLMDBBackingStore dbTracer limits liveFS@(API.LiveLMDBFS liveFS') snapFS@(API.SnapshotsFS snapFS') initFrom = do + Trace.traceWith dbTracer API.BSOpening + + db@Db { dbEnv + , dbState + , dbBackingTables + } <- createOrGetDB + + maybePopulate dbEnv dbState dbBackingTables + + Trace.traceWith dbTracer $ API.BSOpened $ Just path + + pure $ mkBackingStore db + where + + path = FS.mkFsPath ["tables"] + + createOrGetDB :: m (Db m l) + createOrGetDB = do + + dbOpenHandles <- IOLike.newTVarIO Map.empty + dbStatusLock <- Status.new Open + + -- get the filepath for this db creates the directory if appropriate + dbFilePath <- guardDbDirWithRetry DirMustNotExist liveFS' path + + -- copy from another lmdb path if appropriate + case initFrom of + API.InitFromCopy fp -> initFromLMDBs dbTracer limits snapFS fp liveFS path + _ -> pure () + + -- open this database + dbEnv <- liftIO $ LMDB.openEnvironment dbFilePath (unLMDBLimits limits) + + -- The LMDB.Database that holds the @`DbState`@ (i.e. sequence number) + -- This transaction must be read-write because on initialisation it creates the database + dbState <- liftIO $ LMDB.readWriteTransaction dbEnv $ LMDB.getDatabase (Just "_dbstate") + + -- Here we get the LMDB.Databases for the tables of the ledger state + -- Must be read-write transaction because tables may need to be created + dbBackingTables <- liftIO $ LMDB.readWriteTransaction dbEnv $ + lttraverse getDb (ltpure $ K2 "utxo") + + dbNextId <- IOLike.newTVarIO 0 + + pure $ Db { dbEnv + , dbState + , dbBackingTables + , dbFilePath + , dbTracer + , dbStatusLock + , dbOpenHandles + , dbNextId + } + + maybePopulate :: LMDB.Internal.Environment LMDB.Internal.ReadWrite + -> LMDB.Internal.Database () DbState + -> LedgerTables l LMDBMK + -> m () + maybePopulate dbEnv dbState dbBackingTables = do + -- now initialise those tables if appropriate + case initFrom of + API.InitFromValues slot vals -> initFromVals dbTracer slot vals dbEnv dbState dbBackingTables + _ -> pure () + + mkBackingStore :: HasCallStack => Db m l -> API.LedgerBackingStore m l + mkBackingStore db = + let bsClose :: m () + bsClose = Status.withWriteAccess' dbStatusLock traceAlreadyClosed $ do + Trace.traceWith dbTracer API.BSClosing + openHandles <- IOLike.readTVarIO dbOpenHandles + forM_ openHandles runCleanup + IOLike.atomically $ IOLike.writeTVar dbOpenHandles mempty + liftIO $ LMDB.closeEnvironment dbEnv + Trace.traceWith dbTracer API.BSClosed + pure ((), Closed) + where + traceAlreadyClosed = Trace.traceWith dbTracer API.BSAlreadyClosed + + bsCopy bsp = Status.withReadAccess dbStatusLock LMDBErrClosed $ do + to <- guardDbDir DirMustNotExist snapFS' bsp + lmdbCopy path dbTracer dbEnv to + + bsValueHandle = Status.withReadAccess dbStatusLock LMDBErrClosed $ do + mkLMDBBackingStoreValueHandle db + + bsWrite :: SlotNo -> LedgerTables l DiffMK -> m () + bsWrite slot diffs = do + Trace.traceWith dbTracer $ API.BSWriting slot + Status.withReadAccess dbStatusLock LMDBErrClosed $ do + oldSlot <- liftIO $ LMDB.readWriteTransaction dbEnv $ withDbStateRW dbState $ \s@DbState{dbsSeq} -> do + unless (dbsSeq <= At slot) $ liftIO . throwIO $ LMDBErrNonMonotonicSeq (At slot) dbsSeq + void $ ltzipWith3A writeLMDBTable dbBackingTables codecLedgerTables diffs + pure (dbsSeq, s {dbsSeq = At slot}) + Trace.traceWith dbTracer $ API.BSWritten oldSlot slot + + in API.BackingStore { API.bsClose = bsClose + , API.bsCopy = bsCopy + , API.bsValueHandle = bsValueHandle + , API.bsWrite = bsWrite + } + + where + Db { dbEnv + , dbState + , dbBackingTables + , dbStatusLock + , dbOpenHandles + } = db + +-- | Create a backing store value handle that has a consistent view of the +-- current database state (i.e., the database contents, not to be confused +-- with 'DbState'). +mkLMDBBackingStoreValueHandle :: + forall l m. + (HasLedgerTables l, CanSerializeLedgerTables l, MonadIO m, IOLike m, HasCallStack) + => Db m l + -- ^ The LMDB database for which the backing store value handle is + -- created. + -> m (API.LedgerBackingStoreValueHandle m l) +mkLMDBBackingStoreValueHandle db = do + vhId <- IOLike.atomically $ do + vhId <- IOLike.readTVar dbNextId + IOLike.modifyTVar' dbNextId (+1) + pure vhId + + let + dbEnvRo = LMDB.readOnlyEnvironment dbEnv + tracer = API.BSValueHandleTrace (Just vhId) >$< dbTracer + + Trace.traceWith dbTracer API.BSCreatingValueHandle + + trh <- liftIO $ TrH.newReadOnly dbEnvRo + mbInitSlot <- liftIO $ TrH.submitReadOnly trh $ readDbStateMaybeNull dbState + initSlot <- liftIO $ maybe (throwIO LMDBErrUnableToReadSeqNo) (pure . dbsSeq) mbInitSlot + + vhStatusLock <- Status.new Open + + let + -- | Clean up a backing store value handle by committing its transaction + -- handle. + cleanup :: Cleanup m + cleanup = Cleanup $ + liftIO $ TrH.commit trh + + bsvhClose :: m () + bsvhClose = + Status.withReadAccess' dbStatusLock traceAlreadyClosed $ do + Status.withWriteAccess' vhStatusLock traceTVHAlreadyClosed $ do + Trace.traceWith tracer API.BSVHClosing + runCleanup cleanup + IOLike.atomically $ IOLike.modifyTVar' dbOpenHandles (Map.delete vhId) + Trace.traceWith tracer API.BSVHClosed + pure ((), Closed) + where + traceAlreadyClosed = Trace.traceWith dbTracer API.BSAlreadyClosed + traceTVHAlreadyClosed = Trace.traceWith tracer API.BSVHAlreadyClosed + + bsvhRead :: LedgerTables l KeysMK -> m (LedgerTables l ValuesMK) + bsvhRead keys = + Status.withReadAccess dbStatusLock LMDBErrClosed $ do + Status.withReadAccess vhStatusLock (LMDBErrNoValueHandle vhId) $ do + Trace.traceWith tracer API.BSVHReading + res <- liftIO $ TrH.submitReadOnly trh (ltzipWith3A readLMDBTable dbBackingTables codecLedgerTables keys) + Trace.traceWith tracer API.BSVHRead + pure res + + bsvhRangeRead :: + API.RangeQuery (LedgerTables l KeysMK) + -> m (LedgerTables l ValuesMK) + bsvhRangeRead rq = + Status.withReadAccess dbStatusLock LMDBErrClosed $ do + Status.withReadAccess vhStatusLock (LMDBErrNoValueHandle vhId) $ do + Trace.traceWith tracer API.BSVHRangeReading + + let + outsideIn :: + Maybe (LedgerTables l mk1) + -> LedgerTables l (Maybe :..: mk1) + outsideIn Nothing = ltpure (Comp2 Nothing) + outsideIn (Just tables) = ltmap (Comp2 . Just) tables + + transaction = + ltzipWith3A + (rangeRead rqCount) + dbBackingTables + codecLedgerTables + (outsideIn rqPrev) + + res <- liftIO $ TrH.submitReadOnly trh transaction + Trace.traceWith tracer API.BSVHRangeRead + pure res + where + API.RangeQuery rqPrev rqCount = rq + + bsvhStat :: m API.Statistics + bsvhStat = + Status.withReadAccess dbStatusLock LMDBErrClosed $ do + Status.withReadAccess vhStatusLock (LMDBErrNoValueHandle vhId) $ do + Trace.traceWith tracer API.BSVHStatting + let transaction = do + DbState{dbsSeq} <- readDbState dbState + constn <- lttraverse (\(LMDBMK _ dbx) -> K2 <$> LMDB.size dbx) dbBackingTables + let n = getSum $ ltcollapse $ ltmap (K2 . Sum . unK2) constn + pure $ API.Statistics dbsSeq n + res <- liftIO $ TrH.submitReadOnly trh transaction + Trace.traceWith tracer API.BSVHStatted + pure res + + bsvh = API.BackingStoreValueHandle { API.bsvhAtSlot = initSlot + , API.bsvhClose = bsvhClose + , API.bsvhRead = bsvhRead + , API.bsvhRangeRead = bsvhRangeRead + , API.bsvhStat = bsvhStat + } + + IOLike.atomically $ IOLike.modifyTVar' dbOpenHandles (Map.insert vhId cleanup) + + Trace.traceWith dbTracer API.BSCreatedValueHandle + pure bsvh + + where + Db { dbEnv + , dbTracer + , dbState + , dbOpenHandles + , dbBackingTables + , dbNextId + , dbStatusLock + } = db + +-- | A monadic action used for cleaning up resources. +newtype Cleanup m = Cleanup { runCleanup :: m () } + +{------------------------------------------------------------------------------- + Errors +-------------------------------------------------------------------------------} + +-- | Errors that can be thrown by LMDB. +-- +-- __WARNING__: these errors will be thrown in IO as having a corrupt database +-- is critical for the functioning of Consensus. +data LMDBErr = + -- | The database state can not be found on-disk. + LMDBErrNoDbState + -- | The sequence number of a @`Db`@ should be monotonically increasing + -- across calls to @`bsWrite`@, since we use @`bsWrite`@ to flush + -- /immutable/ changes. That is, we can only flush with a newer sequence + -- number because the changes should be /immutable/. Note that this does + -- not mean that values can not be changed in the future, only that we + -- can not change values in the past. + | LMDBErrNonMonotonicSeq !(WithOrigin SlotNo) !(WithOrigin SlotNo) + -- | The database table that is being initialised is non-empty. + | LMDBErrInitialisingNonEmpty !String + -- | The database that is being initialized already had a DbState table + | LMDBErrInitialisingAlreadyHasState + -- | Trying to use a non-existing value handle. + | LMDBErrNoValueHandle !Int + -- | Couldn't create a value handle because we couldn't read the sequence + -- number + | LMDBErrUnableToReadSeqNo + -- | Failed to read a value from a database table. + | LMDBErrBadRead + -- | Failed to read a range of values from a database table. + | LMDBErrBadRangeRead + -- | A database directory should not exist already. + | LMDBErrDirExists !FilePath + -- | A database directory should exist already. + | LMDBErrDirDoesntExist !FilePath + -- | The directory exists but is not an LMDB directory! + | LMDBErrDirIsNotLMDB !FilePath + -- | What should be a directory is in fact a file + | LMDBErrNotADir !FS.FsPath + -- | The database has been closed, so all backing store operations should + -- throw an error. + | LMDBErrClosed + +instance Exception LMDBErr + +-- | Show instance for pretty printing @`LMDBErr`@s as error messages that +-- include: (i) an indication of the probable cause of the error, and +-- (ii) a descriptive error message for the specific @`LMDBErr`@. +instance Show LMDBErr where + show dbErr = mconcat + [ "[LMDB-ERROR] " + , "The LMDB Backing store has encountered a fatal exception. " + , "Possibly, the LMDB database is corrupted.\n" + , "[ERROR-MSG] " + , prettyPrintLMDBErr dbErr + ] + +-- | Pretty print a @`LMDBErr`@ with a descriptive error message. +prettyPrintLMDBErr :: LMDBErr -> String +prettyPrintLMDBErr = \case + LMDBErrNoDbState -> + "Can not find the database state on-disk." + LMDBErrNonMonotonicSeq s1 s2 -> + "Trying to write to the database with a non-monotonic sequence number: " + <> showParen True (shows s1) "" + <> " is not <= " + <> showParen True (shows s2) "" + LMDBErrInitialisingNonEmpty s -> + "The database table that is being initialised is non-empty: " <> s + LMDBErrInitialisingAlreadyHasState -> + "The database contains no values but still has a table with a sequence number." + LMDBErrNoValueHandle vh_id -> + "Trying to use non-existing value handle: " <> show vh_id + LMDBErrUnableToReadSeqNo -> + "Reading the sequence number failed thus we couldn't create a value handle." + LMDBErrBadRead -> + "Failed to read a value from a database table." + LMDBErrBadRangeRead -> + "Failed to read a range of values from a database table." + LMDBErrDirExists path -> + "Database directory should not exist already: " <> show path + LMDBErrDirDoesntExist path -> + "Database directory should already exist: " <> show path + LMDBErrDirIsNotLMDB path -> + "Database directory doesn't contain an LMDB database: " + <> show path + <> "\nPre-UTxO-HD and In-Memory implementations are incompatible \ + \ with the LMDB implementation, please delete your ledger database \ + \ if you want to run with LMDB" + LMDBErrNotADir path -> + "The path " <> show path <> " should be a directory but it is a file instead." + LMDBErrClosed -> "The database has been closed." diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/LedgerDB/V1/BackingStore/Impl/LMDB/Bridge.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/LedgerDB/V1/BackingStore/Impl/LMDB/Bridge.hs new file mode 100644 index 0000000000..8c76878d75 --- /dev/null +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/LedgerDB/V1/BackingStore/Impl/LMDB/Bridge.hs @@ -0,0 +1,179 @@ +{-# LANGUAGE DataKinds #-} +{-# LANGUAGE Rank2Types #-} + +{-| Alternatives to LMDB operations that do not rely on @'Serialise'@ instances + + We cannot (easily and without runtime overhead) satisfy the @'Serialise'@ + constraints that the @lmdb-simple@ operations require. We have access to the + codification and decodification functions provided in @'CodecMK'@, thus, we + redefine parts of the internal @LMDB.Simple@ operations here. The + redefinitions are largely analogous to their counterparts, though they thread + through explicit CBOR encoders and decoders. +-} +module Ouroboros.Consensus.Storage.LedgerDB.V1.BackingStore.Impl.LMDB.Bridge ( + -- * Internal: peek and poke + peekMDBVal + , pokeMDBVal + -- * Internal: marshalling + , deserialiseLBS + , marshalIn + , marshalInBS + , marshalOut + , serialiseBS + , serialiseLBS + -- * Cursor + , fromCodecMK + , runCursorAsTransaction' + -- * Internal: get and put + , delete + , deleteBS + , get + , getBS + , getBS' + , put + , putBS + ) where + +import Codec.CBOR.Decoding (Decoder) +import Codec.CBOR.Encoding (Encoding) +import Codec.CBOR.Read (deserialiseFromBytes) +import Codec.CBOR.Write (toLazyByteString) +import Control.Exception (assert) +import Control.Monad ((>=>)) +import Control.Monad.IO.Class (MonadIO (liftIO)) +import qualified Data.ByteString as BS +import qualified Data.ByteString.Lazy as LBS +import Database.LMDB.Raw (MDB_val (MDB_val), mdb_reserve') +import Database.LMDB.Simple (Database, Mode (ReadWrite), Transaction) +import Database.LMDB.Simple.Cursor (CursorM) +import qualified Database.LMDB.Simple.Cursor as Cursor +import qualified Database.LMDB.Simple.Internal as Internal +import Foreign (Ptr, Storable (peek, poke), castPtr) +import Ouroboros.Consensus.Ledger.Tables + +{------------------------------------------------------------------------------- + Internal: peek and poke +-------------------------------------------------------------------------------} + +peekMDBVal :: (forall s. Decoder s a) -> Ptr MDB_val -> IO a +peekMDBVal dec = peek >=> marshalIn dec + +pokeMDBVal :: (a -> Encoding) -> Ptr MDB_val -> a -> IO () +pokeMDBVal enc ptr x = marshalOut enc x (poke ptr) + +{------------------------------------------------------------------------------- + Internal: marshalling +-------------------------------------------------------------------------------} + +marshalIn :: + (forall s. Decoder s a) + -> MDB_val + -> IO a +marshalIn dec v = deserialiseLBS "" dec . LBS.fromStrict <$> marshalInBS v + +marshalInBS :: MDB_val -> IO BS.ByteString +marshalInBS (MDB_val len ptr) = BS.packCStringLen (castPtr ptr, fromIntegral len) + +-- | Deserialise an @'LBS.ByteString'@ using the provided decoder. +deserialiseLBS :: + String + -- ^ Label to be used for error reporting. This should describe the value to + -- be deserialised. + -> (forall s . Decoder s a) + -> LBS.ByteString + -> a +deserialiseLBS label decoder bs = either err snd $ deserialiseFromBytes decoder bs + where + err = error $ "deserialiseBS: error deserialising " ++ label ++ " from the database." + +marshalOut :: + (v -> Encoding) + -> v + -> (MDB_val -> IO t) + -> IO t +marshalOut enc = marshalOutBS . serialiseBS enc + +marshalOutBS :: BS.ByteString -> (MDB_val -> IO a) -> IO a +marshalOutBS = Internal.marshalOutBS + +serialiseBS :: (a -> Encoding) -> a -> BS.ByteString +serialiseBS enc = LBS.toStrict . serialiseLBS enc + +serialiseLBS :: (a -> Encoding) -> a -> LBS.ByteString +serialiseLBS enc = toLazyByteString . enc + +{------------------------------------------------------------------------------- + Cursor +-------------------------------------------------------------------------------} + +fromCodecMK :: CodecMK k v -> Cursor.PeekPoke k v +fromCodecMK (CodecMK encKey encVal decKey decVal) = Cursor.PeekPoke { + Cursor.kPeek = peekMDBVal decKey + , Cursor.vPeek = peekMDBVal decVal + , Cursor.kPoke = pokeMDBVal encKey + , Cursor.vPoke = pokeMDBVal encVal + } + +-- | Wrapper around @'Cursor.runCursorAsTransaction''@ that requires a +-- @'CodecMK'@ instead of a @'PeekPoke'@. +runCursorAsTransaction' :: + CursorM k v mode a + -> Database k v + -> CodecMK k v + -> Transaction mode a +runCursorAsTransaction' cm db codecMK = + Cursor.runCursorAsTransaction' cm db (fromCodecMK codecMK) + +{------------------------------------------------------------------------------- + Internal: get, put and delete +-------------------------------------------------------------------------------} + +get :: + CodecMK k v + -> Database k v + -> k + -> Transaction mode (Maybe v) +get (CodecMK encKey _ _ decVal) db = getBS decVal db . serialiseBS encKey + +getBS :: + (forall s. Decoder s v) + -> Database k v + -> BS.ByteString + -> Transaction mode (Maybe v) +getBS dec db k = getBS' db k >>= + maybe (return Nothing) (liftIO . fmap Just . marshalIn dec) + +getBS' :: Database k v -> BS.ByteString -> Transaction mode (Maybe MDB_val) +getBS' = Internal.getBS' + +put :: + CodecMK k v + -> Database k v + -> k + -> v + -> Transaction ReadWrite () +put codecMK@(CodecMK encKey _ _ _) db = putBS codecMK db . serialiseBS encKey + +putBS :: + CodecMK k v + -> Database k v + -> BS.ByteString + -> v + -> Transaction ReadWrite () +putBS (CodecMK _ encVal _ _) (Internal.Db _ dbi) keyBS value = Internal.Txn $ \txn -> + Internal.marshalOutBS keyBS $ \kval -> do + let valueLBS = serialiseLBS encVal value + sz = fromIntegral (LBS.length valueLBS) + MDB_val len ptr <- mdb_reserve' Internal.defaultWriteFlags txn dbi kval sz + let len' = fromIntegral len + assert (len' == sz) $ Internal.copyLazyBS valueLBS (castPtr ptr) len' + +delete :: + CodecMK k v + -> Database k v + -> k + -> Transaction ReadWrite Bool +delete (CodecMK encKey _ _ _) db = deleteBS db . serialiseBS encKey + +deleteBS :: Database k v -> BS.ByteString -> Transaction ReadWrite Bool +deleteBS = Internal.deleteBS diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/LedgerDB/V1/BackingStore/Impl/LMDB/Status.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/LedgerDB/V1/BackingStore/Impl/LMDB/Status.hs new file mode 100644 index 0000000000..3ce24850bf --- /dev/null +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/LedgerDB/V1/BackingStore/Impl/LMDB/Status.hs @@ -0,0 +1,107 @@ +{-# LANGUAGE DeriveAnyClass #-} +{-# LANGUAGE DeriveGeneric #-} +{-# LANGUAGE DerivingStrategies #-} +{-# LANGUAGE LambdaCase #-} +{-# LANGUAGE TupleSections #-} + +-- | LMDB resource status with read-append-write locking +module Ouroboros.Consensus.Storage.LedgerDB.V1.BackingStore.Impl.LMDB.Status ( + -- * Status + Status (..) + , StatusLock + -- * Locks + , new + , withReadAccess + , withReadAccess' + , withWriteAccess + , withWriteAccess' + ) where + +import Control.Exception (Exception) +import Control.RAWLock (RAWLock) +import qualified Control.RAWLock as RAW +import Data.Functor ((<&>)) +import GHC.Generics (Generic) +import NoThunks.Class (NoThunks) +import Ouroboros.Consensus.Util.IOLike (IOLike, MonadThrow (throwIO)) + +{------------------------------------------------------------------------------- + Status +-------------------------------------------------------------------------------} + +-- | A 'RAWLock' for 'Status'. +newtype StatusLock m = StatusLock { getStatusLock :: RAWLock m Status } + +-- | Whether a resource is open or closed. +-- +-- Resources that we keep track of are: (i) the full LMDB backing store, and +-- (ii) each of the LMDB backing store value handles. +data Status = Open | Closed + deriving stock (Show, Eq, Generic) + deriving anyclass (NoThunks) + +{------------------------------------------------------------------------------- + Locks +-------------------------------------------------------------------------------} + +-- | Create a new 'StatusLock'. +new :: IOLike m => Status -> m (StatusLock m) +new st = StatusLock <$> RAW.new st + +-- | A variant of 'RAW.withWriteAccess' that throws an exception if @'Status' == +-- 'Closed'@. +-- +-- Note: contrary to 'RAW.withWriteAccess', the action to perform with the +-- acquired lock is not of type @'Status' -> ('Status', a)@. The 'Status' is +-- known to be 'Open', or an exception would have been thrown. +withWriteAccess :: + (IOLike m, Exception e) + => StatusLock m + -> e -- ^ The exception to throw + -> m (a, Status) -- ^ Action to perform, possibly updating the 'Status' + -> m a +withWriteAccess lock exc k = + RAW.withWriteAccess (getStatusLock lock) $ \case + Open -> k + Closed -> throwIO exc + +-- | Like 'withWriteAccess', but run an action when the status is 'Closed'. +withWriteAccess' :: + IOLike m + => StatusLock m + -> m a + -> m (a, Status) + -> m a +withWriteAccess' lock def k = + RAW.withWriteAccess (getStatusLock lock) $ \case + Open -> k + Closed -> def <&> (,Closed) + +-- | A variant of 'RAW.withReadAccess' that throws an exception if @'Status' == +-- 'Closed'@. +-- +-- Note: contrary to 'RAW.withReadAccess', the action to perform with the +-- acquired lock is not of type @'Status' -> a@. The 'Status' is known to be +-- 'Open', or an exception would have been thrown. +withReadAccess :: + (IOLike m, Exception e) + => StatusLock m + -> e -- ^ The exception to throw + -> m a -- ^ Action to perform + -> m a +withReadAccess lock exc k = + RAW.withReadAccess (getStatusLock lock) $ \case + Open -> k + Closed -> throwIO exc + +-- | Like 'withReadAccess', but run an action when the status is 'Closed'. +withReadAccess' :: + IOLike m + => StatusLock m + -> m a + -> m a + -> m a +withReadAccess' lock def k = + RAW.withReadAccess (getStatusLock lock) $ \case + Open -> k + Closed -> def diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/LedgerDB/V1/Common.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/LedgerDB/V1/Common.hs new file mode 100644 index 0000000000..dbe037bc0b --- /dev/null +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/LedgerDB/V1/Common.hs @@ -0,0 +1,256 @@ +{-# LANGUAGE CPP #-} +{-# LANGUAGE ConstraintKinds #-} +{-# LANGUAGE DataKinds #-} +{-# LANGUAGE DeriveAnyClass #-} +{-# LANGUAGE DeriveGeneric #-} +{-# LANGUAGE DerivingVia #-} +{-# LANGUAGE FlexibleContexts #-} +#if __GLASGOW_HASKELL__ <= 906 +{-# LANGUAGE GADTs #-} +#endif +{-# LANGUAGE GeneralisedNewtypeDeriving #-} +{-# LANGUAGE GeneralizedNewtypeDeriving #-} +{-# LANGUAGE LambdaCase #-} +{-# LANGUAGE RankNTypes #-} +{-# LANGUAGE ScopedTypeVariables #-} +{-# LANGUAGE StandaloneDeriving #-} +{-# LANGUAGE StandaloneKindSignatures #-} +{-# LANGUAGE TypeApplications #-} +{-# LANGUAGE UndecidableInstances #-} + +module Ouroboros.Consensus.Storage.LedgerDB.V1.Common ( + -- * LedgerDB internal state + LedgerDBEnv (..) + , LedgerDBHandle (..) + , LedgerDBState (..) + , getEnv + , getEnv1 + , getEnv2 + , getEnv5 + , getEnvSTM + , getEnvSTM1 + -- * Forkers + , ForkerEnv (..) + , getForkerEnv + , getForkerEnv1 + , getForkerEnvSTM + ) where + +import Control.Arrow +import Control.Tracer +import Data.Kind +import Data.Map.Strict (Map) +import qualified Data.Map.Strict as Map +import Data.Set (Set) +import Data.Word +import GHC.Generics (Generic) +import NoThunks.Class +import Ouroboros.Consensus.Block +import Ouroboros.Consensus.Config +import Ouroboros.Consensus.Ledger.Abstract +import Ouroboros.Consensus.Ledger.SupportsProtocol +import Ouroboros.Consensus.Storage.LedgerDB.API as API +import Ouroboros.Consensus.Storage.LedgerDB.API.Config +import Ouroboros.Consensus.Storage.LedgerDB.Impl.Common +import Ouroboros.Consensus.Storage.LedgerDB.Impl.Snapshots +import Ouroboros.Consensus.Storage.LedgerDB.Impl.Validate +import Ouroboros.Consensus.Storage.LedgerDB.V1.Args +import Ouroboros.Consensus.Storage.LedgerDB.V1.BackingStore +import Ouroboros.Consensus.Storage.LedgerDB.V1.DbChangelog +import Ouroboros.Consensus.Storage.LedgerDB.V1.Lock +import Ouroboros.Consensus.Util.CallStack +import Ouroboros.Consensus.Util.IOLike + +{------------------------------------------------------------------------------- + LedgerDB internal state +-------------------------------------------------------------------------------} + +newtype LedgerDBHandle m l blk = LDBHandle (StrictTVar m (LedgerDBState m l blk)) + deriving Generic + +data LedgerDBState m l blk = + LedgerDBOpen !(LedgerDBEnv m l blk) + | LedgerDBClosed + deriving Generic + +deriving instance ( IOLike m + , LedgerSupportsProtocol blk + , NoThunks (l EmptyMK) + , NoThunks (Key l) + , NoThunks (Value l) + , NoThunks (LedgerCfg l) + ) => NoThunks (LedgerDBState m l blk) + +type LedgerDBEnv :: (Type -> Type) -> LedgerStateKind -> Type -> Type +data LedgerDBEnv m l blk = LedgerDBEnv { + -- | INVARIANT: the tip of the 'LedgerDB' is always in sync with the tip of + -- the current chain of the ChainDB. + ldbChangelog :: !(StrictTVar m (DbChangelog l)) + -- | Handle to the ledger's backing store, containing the parts that grow too + -- big for in-memory residency + , ldbBackingStore :: !(LedgerBackingStore m l) + -- | The flush lock to the 'BackingStore'. This lock is crucial when it + -- comes to keeping the data in memory consistent with the data on-disk. + -- + -- This lock should be held whenever we want to keep a consistent view of + -- the backing store for some time. In particular we use this: + -- + -- - when performing a query on the ledger state, we need to hold a + -- 'LocalStateQueryView' which, while live, must maintain a consistent view + -- of the DB, and therefore we acquire a Read lock. + -- + -- - when taking a snapshot of the ledger db, we need to prevent others + -- from altering the backing store at the same time, thus we acquire a + -- Write lock. + , ldbLock :: !(AllowThunk (LedgerDBLock m)) + -- | INVARIANT: this set contains only points that are in the + -- VolatileDB. + -- + -- INVARIANT: all points on the current chain fragment are in this set. + -- + -- The VolatileDB might contain invalid blocks, these will not be in + -- this set. + -- + -- When a garbage-collection is performed on the VolatileDB, the points + -- of the blocks eligible for garbage-collection should be removed from + -- this set. + , ldbPrevApplied :: !(StrictTVar m (Set (RealPoint blk))) + -- | Open forkers. + -- + -- INVARIANT: a forker is open iff its 'ForkerKey' is in this 'Map. + , ldbForkers :: !(StrictTVar m (Map ForkerKey (ForkerEnv m l blk))) + , ldbNextForkerKey :: !(StrictTVar m ForkerKey) + + , ldbSnapshotPolicy :: !SnapshotPolicy + , ldbTracer :: !(Tracer m (TraceLedgerDBEvent blk)) + , ldbCfg :: !(LedgerDbCfg l) + , ldbHasFS :: !(SnapshotsFS m) + , ldbShouldFlush :: !(Word64 -> Bool) + , ldbQueryBatchSize :: !QueryBatchSize + , ldbResolveBlock :: !(ResolveBlock m blk) + } deriving (Generic) + +deriving instance ( IOLike m + , LedgerSupportsProtocol blk + , NoThunks (l EmptyMK) + , NoThunks (Key l) + , NoThunks (Value l) + , NoThunks (LedgerCfg l) + ) => NoThunks (LedgerDBEnv m l blk) + +-- | Check if the LedgerDB is open, if so, executing the given function on the +-- 'LedgerDBEnv', otherwise, throw a 'CloseDBError'. +getEnv :: + forall m l blk r. (IOLike m, HasCallStack, HasHeader blk) + => LedgerDBHandle m l blk + -> (LedgerDBEnv m l blk -> m r) + -> m r +getEnv (LDBHandle varState) f = readTVarIO varState >>= \case + LedgerDBOpen env -> f env + LedgerDBClosed -> throwIO $ ClosedDBError @blk prettyCallStack + +-- | Variant 'of 'getEnv' for functions taking one argument. +getEnv1 :: + (IOLike m, HasCallStack, HasHeader blk) + => LedgerDBHandle m l blk + -> (LedgerDBEnv m l blk -> a -> m r) + -> a -> m r +getEnv1 h f a = getEnv h (`f` a) + +-- | Variant 'of 'getEnv' for functions taking two arguments. +getEnv2 :: + (IOLike m, HasCallStack, HasHeader blk) + => LedgerDBHandle m l blk + -> (LedgerDBEnv m l blk -> a -> b -> m r) + -> a -> b -> m r +getEnv2 h f a b = getEnv h (\env -> f env a b) + +-- | Variant 'of 'getEnv' for functions taking five arguments. +getEnv5 :: + (IOLike m, HasCallStack, HasHeader blk) + => LedgerDBHandle m l blk + -> (LedgerDBEnv m l blk -> a -> b -> c -> d -> e -> m r) + -> a -> b -> c -> d -> e -> m r +getEnv5 h f a b c d e = getEnv h (\env -> f env a b c d e) + +-- | Variant of 'getEnv' that works in 'STM'. +getEnvSTM :: + forall m l blk r. (IOLike m, HasCallStack, HasHeader blk) + => LedgerDBHandle m l blk + -> (LedgerDBEnv m l blk -> STM m r) + -> STM m r +getEnvSTM (LDBHandle varState) f = readTVar varState >>= \case + LedgerDBOpen env -> f env + LedgerDBClosed -> throwSTM $ ClosedDBError @blk prettyCallStack + +-- | Variant of 'getEnv1' that works in 'STM'. +getEnvSTM1 :: + forall m l blk a r. (IOLike m, HasCallStack, HasHeader blk) + => LedgerDBHandle m l blk + -> (LedgerDBEnv m l blk -> a -> STM m r) + -> a -> STM m r +getEnvSTM1 (LDBHandle varState) f a = readTVar varState >>= \case + LedgerDBOpen env -> f env a + LedgerDBClosed -> throwSTM $ ClosedDBError @blk prettyCallStack + +{------------------------------------------------------------------------------- + Forkers +-------------------------------------------------------------------------------} + +data ForkerEnv m l blk = ForkerEnv { + -- | Local, consistent view of backing store + foeBackingStoreValueHandle :: !(LedgerBackingStoreValueHandle m l) + -- | In memory db changelog + , foeChangelog :: !(StrictTVar m (AnchorlessDbChangelog l)) + -- | Points to 'ldbChangelog'. + , foeSwitchVar :: !(StrictTVar m (DbChangelog l)) + -- | Config + , foeSecurityParam :: !SecurityParam + -- | Config + , foeQueryBatchSize :: !QueryBatchSize + -- | Resource registry + , foeTracer :: !(Tracer m TraceForkerEvent) + } + deriving Generic + +deriving instance ( IOLike m + , LedgerSupportsProtocol blk + , NoThunks (l EmptyMK) + , NoThunks (Key l) + , NoThunks (Value l) + ) => NoThunks (ForkerEnv m l blk) + +getForkerEnv :: + forall m l blk r. (IOLike m, HasCallStack, HasHeader blk) + => LedgerDBHandle m l blk + -> ForkerKey + -> (ForkerEnv m l blk -> m r) + -> m r +getForkerEnv (LDBHandle varState) forkerKey f = do + forkerEnv <- atomically $ readTVar varState >>= \case + LedgerDBClosed -> throwIO $ ClosedDBError @blk prettyCallStack + LedgerDBOpen env -> readTVar (ldbForkers env) >>= (Map.lookup forkerKey >>> \case + Nothing -> throwSTM $ ClosedForkerError @blk forkerKey prettyCallStack + Just forkerEnv -> pure forkerEnv) + + f forkerEnv + +getForkerEnv1 :: + (IOLike m, HasCallStack, HasHeader blk) + => LedgerDBHandle m l blk + -> ForkerKey + -> (ForkerEnv m l blk -> a -> m r) + -> a -> m r +getForkerEnv1 h forkerKey f a = getForkerEnv h forkerKey (`f` a) + +getForkerEnvSTM :: + forall m l blk r. (IOLike m, HasCallStack, HasHeader blk) + => LedgerDBHandle m l blk + -> ForkerKey + -> (ForkerEnv m l blk -> STM m r) + -> STM m r +getForkerEnvSTM (LDBHandle varState) forkerKey f = readTVar varState >>= \case + LedgerDBClosed -> throwIO $ ClosedDBError @blk prettyCallStack + LedgerDBOpen env -> readTVar (ldbForkers env) >>= (Map.lookup forkerKey >>> \case + Nothing -> throwSTM $ ClosedForkerError @blk forkerKey prettyCallStack + Just forkerEnv -> f forkerEnv) diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/LedgerDB/V1/DbChangelog.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/LedgerDB/V1/DbChangelog.hs new file mode 100644 index 0000000000..ad3015095f --- /dev/null +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/LedgerDB/V1/DbChangelog.hs @@ -0,0 +1,1017 @@ +{-# LANGUAGE ConstraintKinds #-} +{-# LANGUAGE DataKinds #-} +{-# LANGUAGE DeriveAnyClass #-} +{-# LANGUAGE DeriveGeneric #-} +{-# LANGUAGE DerivingStrategies #-} +{-# LANGUAGE FlexibleContexts #-} +{-# LANGUAGE FlexibleInstances #-} +{-# LANGUAGE GADTs #-} +{-# LANGUAGE GeneralizedNewtypeDeriving #-} +{-# LANGUAGE MultiParamTypeClasses #-} +{-# LANGUAGE NamedFieldPuns #-} +{-# LANGUAGE QuantifiedConstraints #-} +{-# LANGUAGE ScopedTypeVariables #-} +{-# LANGUAGE StandaloneDeriving #-} +{-# LANGUAGE TypeApplications #-} +{-# LANGUAGE TypeFamilies #-} +{-# LANGUAGE TypeOperators #-} +{-# LANGUAGE UndecidableInstances #-} + +{-# OPTIONS_GHC -Wno-orphans #-} + +-- | A 'DbChangelog' is the component of the +-- 'Ouroboros.Consensus.Storage.LedgerDB.LedgerDB' implementation that +-- responsible for: +-- +-- - Maintaining the last \(k\) in-memory ledger states without on-disk parks. +-- +-- - Holding the in-memory ledger state that a snapshot would write to the disk. +-- +-- - Providing sequences of differences from said state to any requested state +-- in the last \(k\) ledger states, which combined with the values in the +-- 'BackingStore', can provide 'LedgerTable's at any of those ledger states. +-- +-- A 'DbChangelog' is said to be /anchored/ #anchored# at a 'BackingStore' when +-- the slot of the values in the backing store is the predecesor of the slots in +-- the sequence of differences, with the overall sequence of slots being defined +-- by the blocks on the chain. +-- +-- This design is based on the technical report "Storing the Cardano ledger +-- state on disk: API design concepts" by Duncan Coutts and Douglas Wilson. +-- +-- = Implementation details +-- +-- The 'DbChangelog' is in fact a pure data structure, of which the 'LedgerDB' +-- will carry a value in some mutable state, see +-- 'Ouroboros.Consensus.Storage.LedgerDB.LedgerDBState'. +-- +-- == Carrying states +-- +-- The 'DbChangelog' contains an instantiation of the 'AnchoredSeq' data type to +-- hold the last \(k\) in-memory ledger states. This data type is impemented +-- using the /finger tree/ data structure and has the following time +-- complexities: +-- +-- - Appending a new ledger state to the end in constant time. +-- +-- - Rolling back to a previous ledger state in logarithmic time. +-- +-- - Looking up a past ledger state by its point in logarithmic time. +-- +-- One can think of 'AnchoredSeq' as a 'Seq' from "Data.Sequence" with a custom +-- /finger tree measure/ allowing for efficient lookups by point, combined with +-- an /anchor/. When fully /saturated/, the sequence will contain \(k\) ledger +-- states. In case of a complete rollback of all \(k\) blocks and thus ledger +-- states, the sequence will become empty. A ledger state is still needed, i.e., +-- one corresponding to the most recent immutable block that cannot be rolled +-- back. The ledger state at the anchor plays this role. +-- +-- == Appending in-memory states +-- +-- When a new ledger state is appended to a fully saturated 'DbChangelog' (i.e. +-- that contains \(k\) states), the ledger state at the anchor is dropped and +-- the oldest element in the sequence becomes the new anchor, as it has become +-- immutable. This maintains the invariant that only the last \(k\) in-memory +-- ledger states are stored, /excluding/ the ledger state at the anchor. This +-- means that in practice, \(k + 1\) ledger states will be kept in memory. When +-- the 'DbChangelog' contains fewer than \(k\) elements, new ones are appended +-- without shifting the anchor until it is saturated. +-- +-- == Getting and appending differences +-- +-- For the differences, the 'DbChangelog' contains a 'SeqDiffMK' (see +-- "Ouroboros.Consensus.Ledger.Tables.DiffSeq") which in turn is just an +-- instantiation of a /root-measured finger tree/ (see +-- [fingertree-rm](https://github.com/input-output-hk/anti-diffs/tree/main/fingertree-rm)) +-- which is a specialization of the finger trees that carries a root-measure +-- which is the monoidal sum of all the measures of all the elements. +-- +-- This allows us to very efficiently lookup the combined difference of the +-- whole 'DbChangelog', while still having a good complexity when splitting this +-- tree. +-- +-- When a block is to be applied to a ledger state (which must be in the +-- 'DbChangelog' or application would directly fail), applying the root-measure +-- of the sub-sequence of differences from the backing store slot up to the +-- requested slot to the values read from the backing store will provide the +-- 'LedgerTable's needed for applying the block. +-- +-- Once a new ledger state is appended to the 'DbChangelog', said ledger state +-- will carry 'DiffMK' tables (obtained by diffing the input and output ledger +-- tables when calling the Ledger rules). Adding those differences to the +-- 'DbChangelog' is just a matter of extending the carried 'SeqDiffMK'. +-- +-- Only when flushing, the 'SeqDiffMK' is pruned, by extracting the differences +-- in between the last flushed state and the current immutable tip. +module Ouroboros.Consensus.Storage.LedgerDB.V1.DbChangelog ( + -- * The DbChangelog + DbChangelog (..) + , DbChangelog' + -- ** Views + , AnchorlessDbChangelog (..) + , AnchorlessDbChangelog' + , StatesSequence + -- * Construction + , empty + , pruneToImmTipOnly + -- * Mapping changelogs + -- + -- | These functions are analogous to 'fmap' for modifying the inner + -- 'AnchorlessDbChangelog'. + , onChangelog + , onChangelogM + -- * Updating a @DbChangelog@ + -- ** Applying blocks #applying# + -- + -- | Applying blocks to the 'DbChangelog' will extend it if the result is + -- successful. + -- + -- In order to do so, we first need to [find the particular + -- block](#g:findingBlocks), then prepare the ledger tables by [hydrating + -- the ledger state](#g:hydratingTheLedgerState) and then finally call the + -- ledger, which might throw errors. + , reapplyThenPush + -- *** Hydrating the ledger state #hydratingTheLedgerState# + -- + -- | When trying to get tables at a specific ledger state, we must follow a + -- process we call /hydrating the ledger state/. This process consists of 3 steps: + -- + -- 1. Rewind the requested keys to the beginning of the DbChangelog. For + -- UTxO entries this just means that we record at which slot the db + -- changelog was when rewinding. + -- + -- 2. Query the 'BackingStore' for the actual values for the requested keys. + -- + -- 3. Forward those values by applying the differences in the 'DbChangelog' up to + -- the requested point. + , withKeysReadSets + -- **** Rewind + , RewoundTableKeySets (..) + , rewindTableKeySets + -- **** Read + , KeySetsReader + , UnforwardedReadSets (..) + , getLedgerTablesFor + , readKeySets + , readKeySetsWith + , trivialKeySetsReader + -- **** Forward + , RewindReadFwdError (..) + , forwardTableKeySets + , forwardTableKeySets' + -- ** Flushing + , DiffsToFlush (..) + , splitForFlushing + -- * Queries + , anchor + , current + , flushableLength + , getPastLedgerAt + , rollback + , snapshots + , tip + , volatileStatesBimap + -- * 🧪 Testing + -- ** Internal + , extend + , immutableTipSlot + , isSaturated + , maxRollback + , prune + , rollbackN + , rollbackToAnchor + , rollbackToPoint + -- * Testing + , reapplyThenPush' + , reapplyThenPushMany' + , switch + , switch' + ) where + +import Cardano.Slotting.Slot +import Control.Exception as Exn +import Data.Bifunctor (bimap) +import Data.Functor.Identity +import Data.Map.Diff.Strict as AntiDiff (applyDiffForKeys) +import Data.Monoid (Sum (..)) +import Data.SOP (K, unK) +import Data.SOP.Functors +import Data.Word +import GHC.Generics (Generic) +import Ouroboros.Consensus.Block +import Ouroboros.Consensus.Config +import Ouroboros.Consensus.Ledger.Abstract +import Ouroboros.Consensus.Ledger.Extended +import Ouroboros.Consensus.Ledger.Tables.Diff (fromAntiDiff, + toAntiDiff) +import qualified Ouroboros.Consensus.Ledger.Tables.DiffSeq as DS +import Ouroboros.Consensus.Ledger.Tables.Utils +import Ouroboros.Consensus.Storage.LedgerDB.API +import Ouroboros.Consensus.Storage.LedgerDB.API.Config +import Ouroboros.Consensus.Storage.LedgerDB.V1.BackingStore.API +import Ouroboros.Consensus.Util (repeatedlyM) +import Ouroboros.Consensus.Util.IOLike +import Ouroboros.Network.AnchoredSeq (AnchoredSeq) +import qualified Ouroboros.Network.AnchoredSeq as AS + +{------------------------------------------------------------------------------- + The DbChangelog +-------------------------------------------------------------------------------} + +-- | Holds a sequence of split ledger states, where the in-memory part is in a +-- sequence and the on-disk part is represented by a sequence of differences +-- that need a 'BackingStore' as an anchor point. +-- +-- We illustrate its contents below, where @k = 3@ (for a state @Li@, the +-- corresponding set of differences is @Di@): +-- +-- +----------------+------------------------------------+------------------------------------------+ +-- | lastFlushed | states | tableDiffs | +-- +================+====================================+==========================================+ +-- | @L0@ | @L0 :> [ ] @ | @[ ] @ | +-- +----------------+------------------------------------+------------------------------------------+ +-- | @L0@ | @L0 :> [ L1 ] @ | @[ D1 ] @ | +-- +----------------+------------------------------------+------------------------------------------+ +-- | @L0@ | @L0 :> [ L1, L2 ] @ | @[ D1, D2 ] @ | +-- +----------------+------------------------------------+------------------------------------------+ +-- | @L0@ | @L0 :> [ L1, L2, L3 ] @ | @[ D1, D2, D3 ] @ | +-- +----------------+------------------------------------+------------------------------------------+ +-- | @L0@ | @L1 :> [ L2, L3, L4 ] @ | @[ D1, D2, D3, D4 ] @ | +-- +----------------+------------------------------------+------------------------------------------+ +-- | @L0@ | @L2 :> [ L3, L4, L5 ] @ | @[ D1, D2, D3, D4, D5 ] -- (*) @ | +-- +----------------+------------------------------------+------------------------------------------+ +-- | @L2@ | @L2 :> [ L3, L4, L5 ] @ | @[ D3, D4, D5 ] -- flush (**)@ | +-- +----------------+------------------------------------+------------------------------------------+ +-- | @L2@ | @L3 :> [ L4, L5, L6 ]@ | @[ D3, D4, D5, D6 ] @ | +-- +----------------+------------------------------------+------------------------------------------+ +-- +-- Notice that @length states@ is usually @k@ except when rollbacks or data +-- corruption take place and will be less than @k@ when we just loaded a +-- snapshot. We cannot roll back more than @k@ blocks. This means that after a +-- rollback of @k@ blocks at @(*)@, the changelog will look something like this: +-- +-- +------+-------------+--------------+ +-- | @L0@ | @L2 :> [ ]@ | @[ D1, D2 ]@ | +-- +------+-------------+--------------+ +-- +-- And a rollback of @k@ blocks at @(**)@ will look something like this: +-- +-- +------+-------------+-------+ +-- | @L2@ | @L2 :> [ ]@ | @[ ]@ | +-- +------+-------------+-------+ +-- +-- Notice how the states list always contains the in-memory state of the anchor, +-- but the table differences might not contain the differences for that anchor +-- if they have been flushed to the backend. +-- +-- As said above, this @DbChangelog@ has to be coupled with a @BackingStore@ +-- which provides the pointers to the on-disk data. +data DbChangelog l = DbChangelog { + -- | The last flushed ledger state. + -- + -- We need to keep track of this one as this will be the state written to + -- disk when we make a snapshot + changelogLastFlushedState :: !(l EmptyMK) + + -- | The in memory part of the DbChangelog. Most of the operations we do + -- with the @DbChangelog@ happen with the in-memory data only. + , anchorlessChangelog :: !(AnchorlessDbChangelog l) + } + deriving (Generic) + +deriving instance (Eq (Key l), Eq (Value l), Eq (l EmptyMK)) + => Eq (DbChangelog l) +deriving instance (NoThunks (Key l), NoThunks (Value l), NoThunks (l EmptyMK)) + => NoThunks (DbChangelog l) +deriving instance (Show (Key l), Show (Value l), Show (l EmptyMK)) + => Show (DbChangelog l) + +-- | A 'DbChangelog' variant that contains only the information in memory. To +-- perform reads of Ledger Tables, this needs to be coupled with a +-- 'BackingStoreValueHandle' as done in +-- 'Ouroboros.Consensus.LedgerDB.API.LedgerDBView'. +data AnchorlessDbChangelog l = AnchorlessDbChangelog { + -- | Slot of the last flushed changelog state from which this variant + -- originated. Used just for asserting correctness when forwarding. + adcLastFlushedSlot :: !(WithOrigin SlotNo) + -- | The sequence of differences between the last flushed state + -- ('changelogLastFlushedState') and the tip of the volatile sequence + -- ('adcStates'). + , adcDiffs :: !(LedgerTables l SeqDiffMK) + -- | The volatile sequence of states. + -- + -- The anchor of this sequence is the immutable tip, so whenever we flush, + -- we should do so up until that point. The length of this sequence will be + -- @k@ except in abnormal circumstances like rollbacks or data corruption. + , adcStates :: !(StatesSequence l) + } deriving (Generic) + +deriving instance (Eq (LedgerTables l SeqDiffMK), Eq (l EmptyMK)) + => Eq (AnchorlessDbChangelog l) +deriving instance (NoThunks (LedgerTables l SeqDiffMK), NoThunks (l EmptyMK)) + => NoThunks (AnchorlessDbChangelog l) +deriving instance (Show (LedgerTables l SeqDiffMK), Show (l EmptyMK)) + => Show (AnchorlessDbChangelog l) + +type StatesSequence l = AnchoredSeq + (WithOrigin SlotNo) + (l EmptyMK) + (l EmptyMK) + +type AnchorlessDbChangelog' blk = AnchorlessDbChangelog (ExtLedgerState blk) + +instance GetTip l => AS.Anchorable (WithOrigin SlotNo) (l EmptyMK) (l EmptyMK) where + asAnchor = id + getAnchorMeasure _ = getTipSlot + +instance IsLedger l => GetTip (K (DbChangelog l)) where + getTip = castPoint + . getTip + . either id id + . AS.head + . adcStates + . anchorlessChangelog + . unK + +instance IsLedger l => GetTip (K (AnchorlessDbChangelog l)) where + getTip = castPoint + . getTip + . either id id + . AS.head + . adcStates + . unK + +type instance HeaderHash (K @MapKind (DbChangelog l)) = + HeaderHash l + +type instance HeaderHash (K @MapKind (AnchorlessDbChangelog l)) = + HeaderHash l + +type DbChangelog' blk = DbChangelog (ExtLedgerState blk) + +{------------------------------------------------------------------------------- + Construction +-------------------------------------------------------------------------------} + +-- | Creates an empty @DbChangelog@. +empty :: + (HasLedgerTables l, GetTip l) + => l EmptyMK -> DbChangelog l +empty theAnchor = + DbChangelog { + changelogLastFlushedState = theAnchor + , anchorlessChangelog = AnchorlessDbChangelog { + adcLastFlushedSlot = pointSlot $ getTip theAnchor + , adcDiffs = ltpure (SeqDiffMK DS.empty) + , adcStates = AS.Empty theAnchor + } + } + +{------------------------------------------------------------------------------- + Mapping changelogs +-------------------------------------------------------------------------------} + +onChangelog :: (AnchorlessDbChangelog l -> AnchorlessDbChangelog l) + -> DbChangelog l + -> DbChangelog l +onChangelog f dbch = runIdentity $ onChangelogM (Identity . f) dbch + +onChangelogM :: Monad m + => (AnchorlessDbChangelog l -> m (AnchorlessDbChangelog l)) + -> DbChangelog l + -> m (DbChangelog l) +onChangelogM f dbch = do + anchorlessChangelog' <- f $ anchorlessChangelog dbch + pure dbch { anchorlessChangelog = anchorlessChangelog' } + +reapplyBlock :: forall m l blk. (ApplyBlock l blk, Monad m) + => LedgerCfg l + -> blk + -> KeySetsReader m l + -> AnchorlessDbChangelog l + -> m (l DiffMK) +reapplyBlock cfg b ksReader db = + withKeysReadSets (current db) ksReader db (getBlockKeySets b) (return . tickThenReapply cfg b) + +-- | If applying a block on top of the ledger state at the tip is succesful, +-- extend the DbChangelog with the resulting ledger state. +-- +-- Note that we require @c@ (from the particular choice of @Ap m l blk c@) so +-- this sometimes can throw ledger errors. +reapplyThenPush :: (Monad m, ApplyBlock l blk) + => LedgerDbCfg l + -> blk + -> KeySetsReader m l + -> AnchorlessDbChangelog l + -> m (AnchorlessDbChangelog l) +reapplyThenPush cfg ap ksReader db = + (\current' -> prune (ledgerDbCfgSecParam cfg) $ extend current' db) <$> + reapplyBlock (ledgerDbCfg cfg) ap ksReader db + +-- | Prune ledger states from the front until at we have at most @k@ in the +-- DbChangelog, excluding the one stored at the anchor. +-- +-- +--------------+----------------------------+----------------------+ +-- | lastFlushed | states | tableDiffs | +-- +==============+============================+======================+ +-- | @L0@ | @L0 :> [ L1, L2, L3, L4 ]@ | @[ D1, D2, D3, D4 ]@ | +-- +--------------+----------------------------+----------------------+ +-- | @>> prune (SecurityParam 3)@ | +-- +--------------+----------------------------+----------------------+ +-- | @L0@ | @L2 :> [ L3, L4 ]@ | @[ D1, D2, D3, D4 ]@ | +-- +--------------+----------------------------+----------------------+ +prune :: GetTip l + => SecurityParam + -> AnchorlessDbChangelog l + -> AnchorlessDbChangelog l +prune (SecurityParam k) dblog = + dblog { adcStates = vol' } + where + AnchorlessDbChangelog { adcStates } = dblog + + nvol = AS.length adcStates + + vol' = + if toEnum nvol <= k + then adcStates + else snd $ AS.splitAt (nvol - fromEnum k) adcStates + +-- NOTE: we must inline 'prune' otherwise we get unexplained thunks in +-- 'DbChangelog' and thus a space leak. Alternatively, we could disable the +-- @-fstrictness@ optimisation (enabled by default for -O1). See #2532. +-- NOTE (@js): this INLINE was inherited from before UTxO-HD, so maybe it is not +-- needed anymore. +{-# INLINE prune #-} + +-- | Extending the DbChangelog with a valid ledger state. +-- +-- +------+----------------------------+----------------------+ +-- | @L2@ | @L2 :> [ L3, L4, L5 ]@ | @[ D3, D4, D5 ]@ | +-- +------+----------------------------+----------------------+ +-- | @>> extend L6 (D6)@ | +-- +------+----------------------------+----------------------+ +-- | @L2@ | @L2 :> [ L3, L4, L5, L6 ]@ | @[ D3, D4, D5, D6 ]@ | +-- +------+----------------------------+----------------------+ +extend :: (GetTip l, HasLedgerTables l) + => l DiffMK + -> AnchorlessDbChangelog l + -> AnchorlessDbChangelog l +extend newState dblog = + AnchorlessDbChangelog { + adcLastFlushedSlot = adcLastFlushedSlot + , adcDiffs = ltliftA2 ext adcDiffs tablesDiff + , adcStates = adcStates AS.:> l' + } + where + slot = case getTipSlot l' of + Origin -> error "impossible! extending a DbChangelog with a state at Origin" + At s -> s + + ext :: + (Ord k, Eq v) + => SeqDiffMK k v + -> DiffMK k v + -> SeqDiffMK k v + ext (SeqDiffMK sq) (DiffMK d) = + SeqDiffMK $ DS.extend sq slot $ toAntiDiff d + + l' = forgetLedgerTables newState + tablesDiff = projectLedgerTables newState + + AnchorlessDbChangelog { + adcLastFlushedSlot + , adcDiffs + , adcStates + } = dblog + +{------------------------------------------------------------------------------- + Rewind +-------------------------------------------------------------------------------} + +data RewoundTableKeySets l = + RewoundTableKeySets + !(WithOrigin SlotNo) -- ^ the slot to which the keys were rewound + !(LedgerTables l KeysMK) + +rewindTableKeySets :: AnchorlessDbChangelog l + -> LedgerTables l KeysMK + -> RewoundTableKeySets l +rewindTableKeySets = RewoundTableKeySets . adcLastFlushedSlot + +{------------------------------------------------------------------------------- + Read +-------------------------------------------------------------------------------} + +type KeySetsReader m l = RewoundTableKeySets l -> m (UnforwardedReadSets l) + +readKeySets :: + IOLike m + => LedgerBackingStore m l + -> KeySetsReader m l +readKeySets backingStore rew = do + withBsValueHandle backingStore (`readKeySetsWith` rew) + +readKeySetsWith :: + Monad m + => LedgerBackingStoreValueHandle m l + -> RewoundTableKeySets l + -> m (UnforwardedReadSets l) +readKeySetsWith bsvh (RewoundTableKeySets _seqNo rew) = do + values <- bsvhRead bsvh rew + pure UnforwardedReadSets { + ursSeqNo = bsvhAtSlot bsvh + , ursValues = values + , ursKeys = rew + } + +withKeysReadSets :: + (HasLedgerTables l, Monad m) + => l mk1 + -> KeySetsReader m l + -> AnchorlessDbChangelog l + -> LedgerTables l KeysMK + -> (l ValuesMK -> m a) + -> m a +withKeysReadSets st ksReader dbch ks f = do + let aks = rewindTableKeySets dbch ks + urs <- ksReader aks + case withHydratedLedgerState urs of + Left err -> + -- We performed the rewind;read;forward sequence in this function. So + -- the forward operation should not fail. If this is the case we're in + -- the presence of a problem that we cannot deal with at this level, + -- so we throw an error. + -- + -- When we introduce pipelining, if the forward operation fails it + -- could be because the DB handle was modified by a DB flush that took + -- place when __after__ we read the unforwarded keys-set from disk. + -- However, performing rewind;read;forward with the same __locked__ + -- changelog should always succeed. + error $ "Changelog rewind;read;forward sequence failed, " <> show err + Right res -> res + + where + withHydratedLedgerState urs = + f + . withLedgerTables st + <$> forwardTableKeySets dbch urs + +-- | The requested point is not found on the ledger db +newtype PointNotFound blk = PointNotFound (Point blk) deriving (Eq, Show) + +-- | Read and forward the values up to the tip of the given ledger db. Returns +-- Left if the anchor moved. If Left is returned, then the caller was just +-- unlucky and scheduling of events happened to move the backing store. Reading +-- again the LedgerDB and calling this function must eventually succeed. +getLedgerTablesFor :: + (Monad m, HasLedgerTables l) + => AnchorlessDbChangelog l + -> LedgerTables l KeysMK + -> KeySetsReader m l + -> m (Either RewindReadFwdError (LedgerTables l ValuesMK)) +getLedgerTablesFor db keys ksRead = do + let aks = rewindTableKeySets db keys + urs <- ksRead aks + pure $ forwardTableKeySets db urs + +trivialKeySetsReader :: (Monad m, LedgerTablesAreTrivial l) => KeySetsReader m l +trivialKeySetsReader (RewoundTableKeySets s _) = + pure $ UnforwardedReadSets s trivialLedgerTables trivialLedgerTables + +{------------------------------------------------------------------------------- + Forward +-------------------------------------------------------------------------------} + +data UnforwardedReadSets l = UnforwardedReadSets { + -- | The Slot number of the anchor of the 'DbChangelog' that was used when + -- rewinding and reading. + ursSeqNo :: !(WithOrigin SlotNo) + -- | The values that were found in the 'BackingStore'. + , ursValues :: !(LedgerTables l ValuesMK) + -- | All the requested keys, being or not present in the 'BackingStore'. + , ursKeys :: !(LedgerTables l KeysMK) + } + +-- | The DbChangelog and the BackingStore got out of sync. This is a critical +-- error, we cannot recover from this. +data RewindReadFwdError = RewindReadFwdError { + rrfBackingStoreAt :: !(WithOrigin SlotNo) + , rrfDbChangelogAt :: !(WithOrigin SlotNo) + } deriving Show + +forwardTableKeySets' :: + HasLedgerTables l + => WithOrigin SlotNo + -> LedgerTables l SeqDiffMK + -> UnforwardedReadSets l + -> Either RewindReadFwdError + (LedgerTables l ValuesMK) +forwardTableKeySets' seqNo chdiffs = \(UnforwardedReadSets seqNo' values keys) -> + if seqNo /= seqNo' + then Left $ RewindReadFwdError seqNo' seqNo + else Right $ ltliftA3 forward values keys chdiffs + where + forward :: + (Ord k, Eq v) + => ValuesMK k v + -> KeysMK k v + -> SeqDiffMK k v + -> ValuesMK k v + forward (ValuesMK values) (KeysMK keys) (SeqDiffMK diffs) = + ValuesMK $ AntiDiff.applyDiffForKeys values keys (DS.cumulativeDiff diffs) + +forwardTableKeySets :: + HasLedgerTables l + => AnchorlessDbChangelog l + -> UnforwardedReadSets l + -> Either RewindReadFwdError + (LedgerTables l ValuesMK) +forwardTableKeySets dblog = + forwardTableKeySets' + (adcLastFlushedSlot dblog) + (adcDiffs dblog) + +{------------------------------------------------------------------------------- + Reset +-------------------------------------------------------------------------------} + +-- | When creating a new @DbChangelog@, we should load whichever snapshot we +-- find and then replay the chain up to the immutable tip. When we get there, +-- the @DbChangelog@ will have a @k@-long sequence of states, which all come +-- from immutable blocks, so we just prune all of them and only keep the last +-- one as an anchor, as it is the immutable tip. Then we can proceed with +-- opening the VolatileDB. +-- +-- If we didn't do this step, the @DbChangelog@ would accept rollbacks into the +-- immutable part of the chain, which must never be possible. +-- +-- +--------------+----------------------------+----------------------+ +-- | lastFlushed | states | tableDiffs | +-- +==============+============================+======================+ +-- | @L0@ | @L0 :> [ L1, L2, L3, L4 ]@ | @[ D1, D2, D3, D4 ]@ | +-- +--------------+----------------------------+----------------------+ +-- | @>> pruneToImmTipOnly@ | +-- +--------------+----------------------------+----------------------+ +-- | @L0@ | @L4 :> [ ]@ | @[ D1, D2, D3, D4 ]@ | +-- +--------------+----------------------------+----------------------+ +pruneToImmTipOnly :: GetTip l + => AnchorlessDbChangelog l + -> AnchorlessDbChangelog l +pruneToImmTipOnly = prune (SecurityParam 0) + +{------------------------------------------------------------------------------- + Internal: rolling back +-------------------------------------------------------------------------------} + +-- | Rollback @n@ ledger states. +-- +-- Returns 'Nothing' if maximum rollback (usually @k@, but can be less on +-- startup or under corruption) is exceeded. +-- +-- +--------------+------------------------+--------------------------+ +-- | lastFlushed | states | tableDiffs | +-- +==============+========================+==========================+ +-- | @L2@ | @L3 :> [ L4, L5, L6 ]@ | @[ D2, D3, D4, D5, D6 ]@ | +-- +--------------+------------------------+--------------------------+ +-- | @>> rollback 3@ | +-- +--------------+------------------------+--------------------------+ +-- | @L2@ | @L3 :> [ ] @ | @[ D2, D3 ]@ | +-- +--------------+------------------------+--------------------------+ +rollbackN :: + (GetTip l, HasLedgerTables l) + => Word64 + -> AnchorlessDbChangelog l + -> Maybe (AnchorlessDbChangelog l) +rollbackN n dblog + | n <= maxRollback dblog + = Just $ dblog { + adcDiffs = ltmap truncSeqDiff adcDiffs + , adcStates = AS.dropNewest (fromIntegral n) adcStates + } + | otherwise + = Nothing + where + truncSeqDiff :: (Ord k, Eq v) => SeqDiffMK k v -> SeqDiffMK k v + truncSeqDiff (SeqDiffMK sq) = + SeqDiffMK $ fst $ DS.splitAtFromEnd (fromIntegral n) sq + + AnchorlessDbChangelog { + adcDiffs + , adcStates + } = dblog + +{------------------------------------------------------------------------------- + Flushing +-------------------------------------------------------------------------------} + +-- | " Flush " the 'DbChangelog' by splitting it into the diffs that should be +-- flushed and the new 'DbChangelog'. +-- +-- +--------------+------------------------+------------------------------------------+ +-- | lastFlushed | states | tableDiffs | +-- +==============+========================+==========================================+ +-- | @L2@ | @L3 :> [ L4, L5, L6 ]@ | @[ D2, D3, D4, D5, D6 ]@ | +-- +--------------+------------------------+------------------------------------------+ +-- | @>> splitForFlushing@ | +-- +--------------+------------------------+------------------------------------------+ +-- | @L2@ | -- | @[ D2, D3 ] -- this is a 'DiffsToFlush'@ | +-- +--------------+------------------------+------------------------------------------+ +-- | @L3@ | @L3 :> [ L4, L5, L6 ]@ | @[ D4, D5, D6 ]@ | +-- +--------------+------------------------+------------------------------------------+ +splitForFlushing :: + forall l. + (GetTip l, HasLedgerTables l) + => DbChangelog l + -> (Maybe (DiffsToFlush l), DbChangelog l) +splitForFlushing dblog = + if getTipSlot immTip == Origin || ltcollapse (ltmap (K2 . DS.length . getSeqDiffMK) l) == 0 + then (Nothing, dblog) + else (Just ldblog, rdblog) + where + DbChangelog { + changelogLastFlushedState + , anchorlessChangelog = AnchorlessDbChangelog { + adcDiffs + , adcStates + } + } = dblog + + immTip = AS.anchor adcStates + + splitSeqDiff :: + (Ord k, Eq v) + => SeqDiffMK k v + -> (SeqDiffMK k v, SeqDiffMK k v) + splitSeqDiff (SeqDiffMK sq) = + let numToFlush = DS.length sq - AS.length adcStates + in bimap (maybe emptyMK SeqDiffMK) SeqDiffMK + $ if numToFlush > 0 + then let (tf, tk) = DS.splitAt numToFlush sq + in (Just tf, tk) + else (Nothing, sq) + + lr = ltmap (uncurry Pair2 . splitSeqDiff) adcDiffs + l = ltmap (\(Pair2 x _) -> x) lr + r = ltmap (\(Pair2 _ y) -> y) lr + + (newTip, newStates) = + if ltcollapse $ ltmap (\(SeqDiffMK sq) -> K2 $ 0 == DS.length sq) l + then (changelogLastFlushedState, adcStates) + else (immTip, adcStates) + + prj :: + (Ord k, Eq v) + => SeqDiffMK k v + -> DiffMK k v + prj (SeqDiffMK sq) = DiffMK (fromAntiDiff $ DS.cumulativeDiff sq) + + ldblog = DiffsToFlush { + toFlushDiffs = ltmap prj l + , toFlushSlot = + fromWithOrigin (error "Flushing a DbChangelog at origin should never happen") + $ getTipSlot immTip + } + + rdblog = DbChangelog { + changelogLastFlushedState = newTip + , anchorlessChangelog = AnchorlessDbChangelog { + adcLastFlushedSlot = getTipSlot newTip + , adcDiffs = r + , adcStates = newStates + } + } + +{------------------------------------------------------------------------------- + Queries +-------------------------------------------------------------------------------} + +-- | The ledger state at the tip of the chain +current :: GetTip l => AnchorlessDbChangelog l -> l EmptyMK +current = + either id id + . AS.head + . adcStates + +-- | The ledger state at the anchor of the Volatile chain (i.e. the immutable +-- tip). +anchor :: AnchorlessDbChangelog l -> l EmptyMK +anchor = + AS.anchor + . adcStates + +-- | All snapshots currently stored by the ledger DB (new to old) +-- +-- This also includes the snapshot at the anchor. For each snapshot we also +-- return the distance from the tip. +snapshots :: AnchorlessDbChangelog l -> [(Word64, l EmptyMK)] +snapshots = + zip [0..] + . AS.toNewestFirst + . adcStates + +-- | How many blocks can we currently roll back? +maxRollback :: GetTip l => AnchorlessDbChangelog l -> Word64 +maxRollback = + fromIntegral + . AS.length + . adcStates + +-- | Reference to the block at the tip of the chain +tip :: GetTip l => AnchorlessDbChangelog l -> Point l +tip = castPoint . getTip . current + +-- | Have we seen at least @k@ blocks? +isSaturated :: GetTip l => SecurityParam -> AnchorlessDbChangelog l -> Bool +isSaturated (SecurityParam k) db = + maxRollback db >= k + +-- | Get a past ledger state +-- +-- \( O(\log(\min(i,n-i)) \) +-- +-- When no ledger state (or anchor) has the given 'Point', 'Nothing' is +-- returned. +getPastLedgerAt :: + ( HasHeader blk, IsLedger l, HeaderHash l ~ HeaderHash blk + , StandardHash l, HasLedgerTables l + ) + => Point blk + -> AnchorlessDbChangelog l + -> Maybe (l EmptyMK) +getPastLedgerAt pt db = current <$> rollback pt db + +-- | Roll back the volatile states up to the specified point. +rollbackToPoint :: + ( StandardHash l + , GetTip l + , HasLedgerTables l + ) + => Point l -> AnchorlessDbChangelog l -> Maybe (AnchorlessDbChangelog l) +rollbackToPoint pt dblog = do + vol' <- + AS.rollback + (pointSlot pt) + ((== pt) . getTip . either id id) + adcStates + let ndropped = AS.length adcStates - AS.length vol' + diffs' = ltmap (trunc ndropped) adcDiffs + Exn.assert (ndropped >= 0) $ pure AnchorlessDbChangelog { + adcLastFlushedSlot + , adcDiffs = diffs' + , adcStates = vol' + } + where + AnchorlessDbChangelog { + adcLastFlushedSlot + , adcDiffs + , adcStates + } = dblog + +-- | Rollback the volatile states up to the volatile anchor. +rollbackToAnchor :: + (GetTip l, HasLedgerTables l) + => AnchorlessDbChangelog l -> AnchorlessDbChangelog l +rollbackToAnchor dblog = + AnchorlessDbChangelog { + adcLastFlushedSlot + , adcDiffs = diffs' + , adcStates = AS.Empty (AS.anchor vol) + } + where + AnchorlessDbChangelog { + adcLastFlushedSlot + , adcDiffs + , adcStates = vol + } = dblog + + ndropped = AS.length vol + diffs' = + ltmap (trunc ndropped) adcDiffs + +trunc :: + (Ord k, Eq v) + => Int -> SeqDiffMK k v -> SeqDiffMK k v +trunc n (SeqDiffMK sq) = + SeqDiffMK $ fst $ DS.splitAtFromEnd n sq + +-- | Get a prefix of the DbChangelog that ends at the given point +-- +-- \( O(\log(\min(i,n-i)) \) +-- +-- When no ledger state (or anchor) has the given 'Point', 'Nothing' is +-- returned. +rollback :: + ( HasHeader blk, IsLedger l, HeaderHash l ~ HeaderHash blk + , StandardHash l, HasLedgerTables l + ) + => Point blk + -> AnchorlessDbChangelog l + -> Maybe (AnchorlessDbChangelog l) +rollback pt db + | pt == castPoint (getTip (anchor db)) + = Just $ rollbackToAnchor db + | otherwise + = rollbackToPoint (castPoint pt) db + +immutableTipSlot :: + GetTip l + => AnchorlessDbChangelog l -> WithOrigin SlotNo +immutableTipSlot = + getTipSlot + . AS.anchor + . adcStates + +-- | How many diffs we can flush to the backing store? +-- +-- NOTE: This will be wrong once we have more than one table. +flushableLength :: (HasLedgerTables l, GetTip l) + => AnchorlessDbChangelog l + -> Word64 +flushableLength chlog = + (\(Sum x) -> x - fromIntegral (AS.length (adcStates chlog))) + . ltcollapse + . ltmap (K2 . f) + $ adcDiffs chlog + where + f :: (Ord k, Eq v) + => SeqDiffMK k v + -> Sum Word64 + f (SeqDiffMK sq) = Sum $ fromIntegral $ DS.length sq + +-- | Transform the underlying volatile 'AnchoredSeq' using the given functions. +volatileStatesBimap :: + AS.Anchorable (WithOrigin SlotNo) a b + => (l EmptyMK -> a) + -> (l EmptyMK -> b) + -> DbChangelog l + -> AS.AnchoredSeq (WithOrigin SlotNo) a b +volatileStatesBimap f g = + AS.bimap f g + . adcStates + . anchorlessChangelog + +{------------------------------------------------------------------------------- + Testing +-------------------------------------------------------------------------------} +reapplyThenPush' :: ApplyBlock l blk + => LedgerDbCfg l + -> blk + -> KeySetsReader Identity l + -> AnchorlessDbChangelog l + -> AnchorlessDbChangelog l +reapplyThenPush' cfg b bk = runIdentity . reapplyThenPush cfg b bk + +reapplyThenPushMany' :: ApplyBlock l blk + => LedgerDbCfg l + -> [blk] + -> KeySetsReader Identity l + -> AnchorlessDbChangelog l + -> AnchorlessDbChangelog l +reapplyThenPushMany' cfg bs bk = + runIdentity . reapplyThenPushMany cfg bs bk + +reapplyThenPushMany :: + (ApplyBlock l blk, Monad m) + => LedgerDbCfg l + -> [blk] + -> KeySetsReader m l + -> AnchorlessDbChangelog l + -> m (AnchorlessDbChangelog l) +reapplyThenPushMany cfg aps ksReader = + repeatedlyM (\ap -> reapplyThenPush cfg ap ksReader) aps + +switch :: + (ApplyBlock l blk, Monad m) + => LedgerDbCfg l + -> Word64 + -> [blk] + -> KeySetsReader m l + -> AnchorlessDbChangelog l + -> m (Either ExceededRollback (AnchorlessDbChangelog l)) +switch cfg numRollbacks newBlocks ksReader db = + case rollbackN numRollbacks db of + Nothing -> + return $ Left $ ExceededRollback { + rollbackMaximum = maxRollback db + , rollbackRequested = numRollbacks + } + Just db' -> case newBlocks of + [] -> pure $ Right db' + -- no blocks to apply to ledger state, return current DbChangelog + _ -> Right <$> reapplyThenPushMany + cfg + newBlocks + ksReader + db' + +switch' :: ApplyBlock l blk + => LedgerDbCfg l + -> Word64 + -> [blk] + -> KeySetsReader Identity l + -> AnchorlessDbChangelog l + -> Maybe (AnchorlessDbChangelog l) +switch' cfg n bs bk db = + case runIdentity $ switch cfg n bs bk db of + Left ExceededRollback{} -> Nothing + Right db' -> Just db' diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/LedgerDB/V1/Flush.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/LedgerDB/V1/Flush.hs new file mode 100644 index 0000000000..60fa55f81c --- /dev/null +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/LedgerDB/V1/Flush.hs @@ -0,0 +1,37 @@ +module Ouroboros.Consensus.Storage.LedgerDB.V1.Flush ( + flushIntoBackingStore + , flushLedgerDB + ) where + +import Ouroboros.Consensus.Ledger.Abstract +import Ouroboros.Consensus.Storage.LedgerDB.V1.BackingStore +import Ouroboros.Consensus.Storage.LedgerDB.V1.DbChangelog +import Ouroboros.Consensus.Storage.LedgerDB.V1.Lock +import Ouroboros.Consensus.Util.IOLike + +flushLedgerDB :: (MonadSTM m, GetTip l, HasLedgerTables l) + => StrictTVar m (DbChangelog l) + -> LedgerBackingStore m l + -> WriteLocked m () +flushLedgerDB chlogVar bstore = do + diffs <- writeLocked $ atomically $ do + ldb' <- readTVar chlogVar + let (toFlush, toKeep) = splitForFlushing ldb' + case toFlush of + Nothing -> pure () + Just {} -> writeTVar chlogVar toKeep + pure toFlush + mapM_ (flushIntoBackingStore bstore) diffs + +-- | Flush **all the changes in this DbChangelog** into the backing store +-- +-- Note that 'flush' must have been called to split the 'DbChangelog' on the +-- immutable tip and produce two 'DbChangelog's, one to flush and one to keep. +-- +-- The write lock must be held before calling this function. +flushIntoBackingStore :: LedgerBackingStore m l -> DiffsToFlush l -> WriteLocked m () +flushIntoBackingStore backingStore dblog = writeLocked $ + bsWrite + backingStore + (toFlushSlot dblog) + (toFlushDiffs dblog) diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/LedgerDB/V1/Forker.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/LedgerDB/V1/Forker.hs new file mode 100644 index 0000000000..a7e1a4517e --- /dev/null +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/LedgerDB/V1/Forker.hs @@ -0,0 +1,480 @@ +{-# LANGUAGE FlexibleContexts #-} +{-# LANGUAGE LambdaCase #-} +{-# LANGUAGE NamedFieldPuns #-} +{-# LANGUAGE ScopedTypeVariables #-} +{-# LANGUAGE TupleSections #-} +{-# LANGUAGE TypeFamilies #-} +{-# LANGUAGE TypeOperators #-} + +module Ouroboros.Consensus.Storage.LedgerDB.V1.Forker ( + -- * Main API + closeAllForkers + , newForkerAtFromTip + , newForkerAtPoint + , newForkerAtWellKnownPoint + -- * Acquire consistent views + , acquireAtFromTip + , acquireAtPoint + , acquireAtWellKnownPoint + ) where + +import Control.ResourceRegistry +import Control.Tracer +import Data.Functor.Contravariant ((>$<)) +import qualified Data.Map.Strict as Map +import Data.Semigroup +import qualified Data.Set as Set +import Data.Word +import NoThunks.Class +import Ouroboros.Consensus.Block +import Ouroboros.Consensus.Ledger.Abstract +import Ouroboros.Consensus.Ledger.SupportsProtocol +import qualified Ouroboros.Consensus.Ledger.Tables.Diff as Diff +import Ouroboros.Consensus.Ledger.Tables.DiffSeq (numDeletes, + numInserts) +import qualified Ouroboros.Consensus.Ledger.Tables.DiffSeq as DS +import Ouroboros.Consensus.Storage.LedgerDB.API as API +import Ouroboros.Consensus.Storage.LedgerDB.API.Config +import Ouroboros.Consensus.Storage.LedgerDB.Impl.Common +import Ouroboros.Consensus.Storage.LedgerDB.V1.Args +import Ouroboros.Consensus.Storage.LedgerDB.V1.BackingStore +import qualified Ouroboros.Consensus.Storage.LedgerDB.V1.BackingStore.API as BackingStore +import Ouroboros.Consensus.Storage.LedgerDB.V1.Common +import Ouroboros.Consensus.Storage.LedgerDB.V1.DbChangelog +import Ouroboros.Consensus.Storage.LedgerDB.V1.Lock +import Ouroboros.Consensus.Util +import Ouroboros.Consensus.Util.IOLike +import Ouroboros.Network.Protocol.LocalStateQuery.Type + +{------------------------------------------------------------------------------- + Close +-------------------------------------------------------------------------------} + +newForkerAtWellKnownPoint :: + ( IOLike m + , IsLedger l + , HasLedgerTables l + , LedgerSupportsProtocol blk + ) + => LedgerDBHandle m l blk + -> ResourceRegistry m + -> Target (Point blk) + -> m (Forker m l blk) +newForkerAtWellKnownPoint h rr pt = getEnv h $ \ldbEnv@LedgerDBEnv{ldbLock = AllowThunk lock} -> do + withReadLock lock (acquireAtWellKnownPoint ldbEnv rr pt) >>= newForker h ldbEnv + +newForkerAtPoint :: + ( HeaderHash l ~ HeaderHash blk + , IOLike m + , IsLedger l + , StandardHash l + , HasLedgerTables l + , LedgerSupportsProtocol blk + ) + => LedgerDBHandle m l blk + -> ResourceRegistry m + -> Point blk + -> m (Either GetForkerError (Forker m l blk)) +newForkerAtPoint h rr pt = getEnv h $ \ldbEnv@LedgerDBEnv{ldbLock = AllowThunk lock} -> do + withReadLock lock (acquireAtPoint ldbEnv rr pt) >>= traverse (newForker h ldbEnv) + +newForkerAtFromTip :: + ( IOLike m + , IsLedger l + , HasLedgerTables l + , LedgerSupportsProtocol blk + ) + => LedgerDBHandle m l blk + -> ResourceRegistry m + -> Word64 + -> m (Either ExceededRollback (Forker m l blk)) +newForkerAtFromTip h rr n = getEnv h $ \ldbEnv@LedgerDBEnv{ldbLock = AllowThunk lock} -> do + withReadLock lock (acquireAtFromTip ldbEnv rr n) >>= traverse (newForker h ldbEnv) + +-- | Close all open block and header 'Follower's. +closeAllForkers :: + IOLike m + => LedgerDBEnv m l blk + -> m () +closeAllForkers ldbEnv = do + forkerEnvs <- atomically $ do + forkerEnvs <- Map.elems <$> readTVar forkersVar + writeTVar forkersVar Map.empty + return forkerEnvs + mapM_ closeForkerEnv forkerEnvs + where + forkersVar = ldbForkers ldbEnv + +closeForkerEnv :: ForkerEnv m l blk -> m () +closeForkerEnv ForkerEnv { foeBackingStoreValueHandle } = bsvhClose foeBackingStoreValueHandle + +{------------------------------------------------------------------------------- + Acquiring consistent views +-------------------------------------------------------------------------------} + +type Resources m l = + (LedgerBackingStoreValueHandle m l, AnchorlessDbChangelog l) + +-- | Acquire both a value handle and a db changelog at the tip. Holds a read lock +-- while doing so. +acquireAtWellKnownPoint :: + (IOLike m, StandardHash blk, GetTip l, HasLedgerTables l) + => LedgerDBEnv m l blk + -> ResourceRegistry m + -> Target (Point blk) + -> ReadLocked m (Resources m l) +acquireAtWellKnownPoint ldbEnv rr VolatileTip = + readLocked $ do + dblog <- anchorlessChangelog <$> readTVarIO (ldbChangelog ldbEnv) + (,dblog) <$> acquire ldbEnv rr dblog +acquireAtWellKnownPoint ldbEnv rr ImmutableTip = + readLocked $ do + dblog <- anchorlessChangelog <$> readTVarIO (ldbChangelog ldbEnv) + (, rollbackToAnchor dblog) + <$> acquire ldbEnv rr dblog +acquireAtWellKnownPoint _ _ (SpecificPoint pt) = + error $ "calling acquireAtWellKnownPoint for a not well-known point: " <> show pt + +-- | Acquire both a value handle and a db changelog at the requested point. Holds +-- a read lock while doing so. +acquireAtPoint :: + forall m l blk. ( + HeaderHash l ~ HeaderHash blk + , IOLike m + , IsLedger l + , StandardHash l + , HasLedgerTables l + , LedgerSupportsProtocol blk + ) + => LedgerDBEnv m l blk + -> ResourceRegistry m + -> Point blk + -> ReadLocked m (Either GetForkerError (Resources m l)) +acquireAtPoint ldbEnv rr pt = + readLocked $ do + dblog <- anchorlessChangelog <$> readTVarIO (ldbChangelog ldbEnv) + let immTip = getTip $ anchor dblog + case rollback pt dblog of + Nothing | pointSlot pt < pointSlot immTip -> pure $ Left PointTooOld + | otherwise -> pure $ Left PointNotOnChain + Just dblog' -> Right . (,dblog') <$> acquire ldbEnv rr dblog' + +-- | Acquire both a value handle and a db changelog at n blocks before the tip. +-- Holds a read lock while doing so. +acquireAtFromTip :: + forall m l blk. ( + IOLike m + , IsLedger l + , HasLedgerTables l + ) + => LedgerDBEnv m l blk + -> ResourceRegistry m + -> Word64 + -> ReadLocked m (Either ExceededRollback (Resources m l)) +acquireAtFromTip ldbEnv rr n = + readLocked $ do + dblog <- anchorlessChangelog <$> readTVarIO (ldbChangelog ldbEnv) + case rollbackN n dblog of + Nothing -> + return $ Left $ ExceededRollback { + API.rollbackMaximum = maxRollback dblog + , API.rollbackRequested = n + } + Just dblog' -> + Right . (,dblog') <$> acquire ldbEnv rr dblog' + +acquire :: + IOLike m + => LedgerDBEnv m l blk + -> ResourceRegistry m + -> AnchorlessDbChangelog l + -> m (LedgerBackingStoreValueHandle m l) +acquire ldbEnv rr dblog = do + (_, vh) <- allocate rr (\_ -> bsValueHandle $ ldbBackingStore ldbEnv) bsvhClose + if bsvhAtSlot vh == adcLastFlushedSlot dblog + then pure vh + else bsvhClose vh >> + error ( "Critical error: Value handles are created at " + <> show (bsvhAtSlot vh) + <> " while the db changelog is at " + <> show (adcLastFlushedSlot dblog) + <> ". There is either a race condition or a logic bug" + ) + +{------------------------------------------------------------------------------- + Make forkers from consistent views +-------------------------------------------------------------------------------} + +newForker :: + ( IOLike m + , HasLedgerTables l + , LedgerSupportsProtocol blk + , NoThunks (l EmptyMK) + , GetTip l + ) + => LedgerDBHandle m l blk + -> LedgerDBEnv m l blk + -> Resources m l + -> m (Forker m l blk) +newForker h ldbEnv (vh, dblog) = do + dblogVar <- newTVarIO dblog + forkerKey <- atomically $ stateTVar (ldbNextForkerKey ldbEnv) $ \r -> (r, r + 1) + let forkerEnv = ForkerEnv { + foeBackingStoreValueHandle = vh + , foeChangelog = dblogVar + , foeSwitchVar = ldbChangelog ldbEnv + , foeSecurityParam = ledgerDbCfgSecParam $ ldbCfg ldbEnv + , foeQueryBatchSize = ldbQueryBatchSize ldbEnv + , foeTracer = LedgerDBForkerEvent . TraceForkerEventWithKey forkerKey >$< ldbTracer ldbEnv + } + atomically $ modifyTVar (ldbForkers ldbEnv) $ Map.insert forkerKey forkerEnv + traceWith (foeTracer forkerEnv) ForkerOpen + pure $ mkForker h forkerKey + +mkForker :: + ( IOLike m + , HasHeader blk + , HasLedgerTables l + , GetTip l + ) + => LedgerDBHandle m l blk + -> ForkerKey + -> Forker m l blk +mkForker h forkerKey = Forker { + forkerClose = implForkerClose h forkerKey + , forkerReadTables = getForkerEnv1 h forkerKey implForkerReadTables + , forkerRangeReadTables = getForkerEnv1 h forkerKey implForkerRangeReadTables + , forkerGetLedgerState = getForkerEnvSTM h forkerKey implForkerGetLedgerState + , forkerReadStatistics = getForkerEnv h forkerKey implForkerReadStatistics + , forkerPush = getForkerEnv1 h forkerKey implForkerPush + , forkerCommit = getForkerEnvSTM h forkerKey implForkerCommit + } + +implForkerClose :: + IOLike m + => LedgerDBHandle m l blk + -> ForkerKey + -> m () +implForkerClose (LDBHandle varState) forkerKey = do + envMay <- atomically $ readTVar varState >>= \case + LedgerDBClosed -> pure Nothing + LedgerDBOpen ldbEnv -> do + stateTVar + (ldbForkers ldbEnv) + (Map.updateLookupWithKey (\_ _ -> Nothing) forkerKey) + whenJust envMay closeForkerEnv + +implForkerReadTables :: + (MonadSTM m, HasLedgerTables l) + => ForkerEnv m l blk + -> LedgerTables l KeysMK + -> m (LedgerTables l ValuesMK) +implForkerReadTables env ks = do + traceWith (foeTracer env) ForkerReadTablesStart + chlog <- readTVarIO (foeChangelog env) + let rew = rewindTableKeySets chlog ks + unfwd <- readKeySetsWith lvh rew + case forwardTableKeySets chlog unfwd of + Left _err -> error "impossible!" + Right vs -> do + traceWith (foeTracer env) ForkerReadTablesEnd + pure vs + where + lvh = foeBackingStoreValueHandle env + +implForkerRangeReadTables :: + (MonadSTM m, HasLedgerTables l) + => ForkerEnv m l blk + -> RangeQueryPrevious l + -> m (LedgerTables l ValuesMK) +implForkerRangeReadTables env rq0 = do + traceWith (foeTracer env) ForkerRangeReadTablesStart + ldb <- readTVarIO $ foeChangelog env + let -- Get the differences without the keys that are greater or equal + -- than the maximum previously seen key. + diffs = + maybe + id + (ltliftA2 doDropLTE) + (BackingStore.rqPrev rq) + $ ltmap prj + $ adcDiffs ldb + -- (1) Ensure that we never delete everything read from disk (ie + -- if our result is non-empty then it contains something read + -- from disk). + -- + -- (2) Also, read one additional key, which we will not include in + -- the result but need in order to know which in-memory + -- insertions to include. + maxDeletes = ltcollapse $ ltmap (K2 . numDeletesDiffMK) diffs + nrequested = 1 + max (BackingStore.rqCount rq) (1 + maxDeletes) + + values <- BackingStore.bsvhRangeRead lvh (rq{BackingStore.rqCount = nrequested}) + traceWith (foeTracer env) ForkerRangeReadTablesEnd + pure $ ltliftA2 (doFixupReadResult nrequested) diffs values + where + lvh = foeBackingStoreValueHandle env + + rq = BackingStore.RangeQuery rq1 (fromIntegral $ defaultQueryBatchSize $ foeQueryBatchSize env) + + rq1 = case rq0 of + NoPreviousQuery -> Nothing + PreviousQueryWasFinal -> Just (LedgerTables $ KeysMK Set.empty) + PreviousQueryWasUpTo k -> Just (LedgerTables $ KeysMK $ Set.singleton k) + + prj :: + (Ord k, Eq v) + => SeqDiffMK k v + -> DiffMK k v + prj (SeqDiffMK sq) = DiffMK (Diff.fromAntiDiff $ DS.cumulativeDiff sq) + + -- Remove all diff elements that are <= to the greatest given key + doDropLTE :: + Ord k + => KeysMK k v + -> DiffMK k v + -> DiffMK k v + doDropLTE (KeysMK ks) (DiffMK ds) = + DiffMK + $ case Set.lookupMax ks of + Nothing -> ds + Just k -> Diff.filterOnlyKey (> k) ds + + -- NOTE: this is counting the deletions wrt disk. + numDeletesDiffMK :: DiffMK k v -> Int + numDeletesDiffMK (DiffMK d) = + getSum $ Diff.foldMapDelta (Sum . oneIfDel) d + where + oneIfDel x = case x of + Diff.Delete -> 1 + Diff.Insert _ -> 0 + + -- INVARIANT: nrequested > 0 + -- + -- (1) if we reached the end of the store, then simply yield the given diff + -- applied to the given values + -- (2) otherwise, the readset must be non-empty, since 'rqCount' is positive + -- (3) remove the greatest read key + -- (4) remove all diff elements that are >= the greatest read key + -- (5) apply the remaining diff + -- (6) (the greatest read key will be the first fetched if the yield of this + -- result is next passed as 'rqPrev') + -- + -- Note that if the in-memory changelog contains the greatest key, then + -- we'll return that in step (1) above, in which case the next passed + -- 'rqPrev' will contain it, which will cause 'doDropLTE' to result in an + -- empty diff, which will result in an entirely empty range query result, + -- which is the termination case. + doFixupReadResult :: + Ord k + => Int + -- ^ Number of requested keys from the backing store. + -> DiffMK k v + -- ^ Differences that will be applied to the values read from the backing + -- store. + -> ValuesMK k v + -- ^ Values read from the backing store. The number of values read should + -- be at most @nrequested@. + -> ValuesMK k v + doFixupReadResult + nrequested + (DiffMK ds) + (ValuesMK vs) = + let includingAllKeys = + Diff.applyDiff vs ds + definitelyNoMoreToFetch = Map.size vs < nrequested + in + ValuesMK + $ case Map.maxViewWithKey vs of + Nothing -> + if definitelyNoMoreToFetch + then includingAllKeys + else error $ "Size of values " <> show (Map.size vs) <> ", nrequested " <> show nrequested + Just ((k, _v), vs') -> + if definitelyNoMoreToFetch then includingAllKeys else + Diff.applyDiff + vs' + (Diff.filterOnlyKey (< k) ds) + +implForkerGetLedgerState :: + (MonadSTM m, GetTip l) + => ForkerEnv m l blk + -> STM m (l EmptyMK) +implForkerGetLedgerState env = current <$> readTVar (foeChangelog env) + +-- | Obtain statistics for a combination of backing store value handle and +-- changelog. +implForkerReadStatistics :: + (MonadSTM m, HasLedgerTables l) + => ForkerEnv m l blk + -> m (Maybe API.Statistics) +implForkerReadStatistics env = do + traceWith (foeTracer env) ForkerReadStatistics + dblog <- readTVarIO (foeChangelog env) + + let seqNo = adcLastFlushedSlot dblog + BackingStore.Statistics{sequenceNumber = seqNo', numEntries = n} <- bsvhStat lbsvh + if seqNo /= seqNo' then + error $ show (seqNo, seqNo') + else do + let + diffs = adcDiffs dblog + + nInserts = getSum + $ ltcollapse + $ ltmap (K2 . numInserts . getSeqDiffMK) + diffs + nDeletes = getSum + $ ltcollapse + $ ltmap (K2 . numDeletes . getSeqDiffMK) + diffs + pure . Just $ API.Statistics { + ledgerTableSize = n + nInserts - nDeletes + } + where + lbsvh = foeBackingStoreValueHandle env + +implForkerPush :: + (MonadSTM m, GetTip l, HasLedgerTables l) + => ForkerEnv m l blk + -> l DiffMK + -> m () +implForkerPush env newState = do + traceWith (foeTracer env) ForkerPushStart + atomically $ do + chlog <- readTVar (foeChangelog env) + let chlog' = prune (foeSecurityParam env) + $ extend newState chlog + writeTVar (foeChangelog env) chlog' + traceWith (foeTracer env) ForkerPushEnd + +implForkerCommit :: + (MonadSTM m, GetTip l, HasLedgerTables l) + => ForkerEnv m l blk + -> STM m () +implForkerCommit env = do + dblog <- readTVar (foeChangelog env) + modifyTVar (foeSwitchVar env) (\pruned -> + let s = fromWithOrigin 0 + . pointSlot + . getTip + $ changelogLastFlushedState pruned + in DbChangelog { + changelogLastFlushedState = changelogLastFlushedState pruned + , anchorlessChangelog = AnchorlessDbChangelog { + adcLastFlushedSlot = adcLastFlushedSlot $ anchorlessChangelog pruned + , adcStates = adcStates dblog + , adcDiffs = + ltliftA2 (f s) (adcDiffs $ anchorlessChangelog pruned) (adcDiffs dblog) + } + }) + where + f :: (Ord k, Eq v) + => SlotNo + -> SeqDiffMK k v + -> SeqDiffMK k v + -> SeqDiffMK k v + f s (SeqDiffMK prunedSeq) (SeqDiffMK extendedSeq) = SeqDiffMK $ + if DS.minSlot prunedSeq == DS.minSlot extendedSeq + then extendedSeq + else snd $ DS.splitAtSlot s extendedSeq diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/LedgerDB/V1/Init.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/LedgerDB/V1/Init.hs new file mode 100644 index 0000000000..88fc8f4bea --- /dev/null +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/LedgerDB/V1/Init.hs @@ -0,0 +1,385 @@ +{-# LANGUAGE BangPatterns #-} +{-# LANGUAGE CPP #-} +{-# LANGUAGE DataKinds #-} +{-# LANGUAGE FlexibleContexts #-} +{-# LANGUAGE LambdaCase #-} +{-# LANGUAGE NamedFieldPuns #-} +{-# LANGUAGE RankNTypes #-} +{-# LANGUAGE ScopedTypeVariables #-} +{-# LANGUAGE TypeFamilies #-} +{-# LANGUAGE TypeOperators #-} + +#if __GLASGOW_HASKELL__ <= 906 +{-# OPTIONS_GHC -Wno-redundant-constraints #-} +#endif + +module Ouroboros.Consensus.Storage.LedgerDB.V1.Init (mkInitDb) where + +import Control.Monad +import Control.Monad.Base +import Control.ResourceRegistry +import Control.Tracer (nullTracer) +#if __GLASGOW_HASKELL__ < 910 +import Data.Foldable +#endif +import Data.Functor.Contravariant ((>$<)) +import qualified Data.Map.Strict as Map +import Data.Maybe (isJust) +import Data.Set (Set) +import qualified Data.Set as Set +import Data.Word +import NoThunks.Class +import Ouroboros.Consensus.Block +import Ouroboros.Consensus.Config +import Ouroboros.Consensus.HardFork.Abstract +import Ouroboros.Consensus.HeaderStateHistory + (HeaderStateHistory (..), mkHeaderStateWithTimeFromSummary) +import Ouroboros.Consensus.HeaderValidation +import Ouroboros.Consensus.Ledger.Abstract +import Ouroboros.Consensus.Ledger.Extended +import Ouroboros.Consensus.Ledger.SupportsProtocol +import Ouroboros.Consensus.Ledger.Tables.Utils +import Ouroboros.Consensus.Storage.ChainDB.Impl.BlockCache +import Ouroboros.Consensus.Storage.LedgerDB.API +import Ouroboros.Consensus.Storage.LedgerDB.API.Config +import Ouroboros.Consensus.Storage.LedgerDB.Impl.Args +import Ouroboros.Consensus.Storage.LedgerDB.Impl.Common +import Ouroboros.Consensus.Storage.LedgerDB.Impl.Init +import Ouroboros.Consensus.Storage.LedgerDB.Impl.Snapshots +import qualified Ouroboros.Consensus.Storage.LedgerDB.Impl.Validate as Validate +import Ouroboros.Consensus.Storage.LedgerDB.V1.Args as V1 +import Ouroboros.Consensus.Storage.LedgerDB.V1.BackingStore as BS +import Ouroboros.Consensus.Storage.LedgerDB.V1.Common +import Ouroboros.Consensus.Storage.LedgerDB.V1.DbChangelog +import qualified Ouroboros.Consensus.Storage.LedgerDB.V1.DbChangelog as DbCh + (empty, flushableLength) +import Ouroboros.Consensus.Storage.LedgerDB.V1.Flush +import Ouroboros.Consensus.Storage.LedgerDB.V1.Forker +import Ouroboros.Consensus.Storage.LedgerDB.V1.Lock +import Ouroboros.Consensus.Storage.LedgerDB.V1.Snapshots +import Ouroboros.Consensus.Util +import Ouroboros.Consensus.Util.Args +import Ouroboros.Consensus.Util.CallStack +import Ouroboros.Consensus.Util.IOLike +import qualified Ouroboros.Network.AnchoredSeq as AS +import System.FS.API + +mkInitDb :: + forall m blk. + ( LedgerSupportsProtocol blk + , IOLike m + , LedgerDbSerialiseConstraints blk + , MonadBase m m + , HasHardForkHistory blk +#if __GLASGOW_HASKELL__ < 910 + , HasAnnTip blk +#endif + ) + => Complete LedgerDbArgs m blk + -> Complete V1.LedgerDbFlavorArgs m + -> Validate.ResolveBlock m blk + -> InitDB (DbChangelog' blk, BackingStore' m blk) m blk +mkInitDb args bss getBlock = + InitDB { + initFromGenesis = do + st <- lgrGenesis + let chlog = DbCh.empty (forgetLedgerTables st) + (_, backingStore) <- + allocate + lgrRegistry + (\_ -> newBackingStore bsTracer baArgs lgrHasFS' (projectLedgerTables st)) + bsClose + pure (chlog, backingStore) + , initFromSnapshot = + loadSnapshot bsTracer baArgs (configCodec . getExtLedgerCfg . ledgerDbCfg $ lgrConfig) lgrHasFS' + , closeDb = bsClose . snd + , initReapplyBlock = \cfg blk (chlog, bstore) -> do + !chlog' <- onChangelogM (reapplyThenPush cfg blk (readKeySets bstore)) chlog + -- It's OK to flush without a lock here, since the `LedgerDB` has not + -- finishined initializing: only this thread has access to the backing + -- store. + chlog'' <- unsafeIgnoreWriteLock + $ if defaultShouldFlush flushFreq (flushableLength $ anchorlessChangelog chlog') + then do + let (toFlush, toKeep) = splitForFlushing chlog' + mapM_ (flushIntoBackingStore bstore) toFlush + pure toKeep + else pure chlog' + pure (chlog'', bstore) + , currentTip = ledgerState . current . anchorlessChangelog . fst + , mkLedgerDb = \(db, lgrBackingStore) -> do + let dbPrunedToImmDBTip = onChangelog pruneToImmTipOnly db + (varDB, prevApplied) <- + (,) <$> newTVarIO dbPrunedToImmDBTip <*> newTVarIO Set.empty + flushLock <- mkLedgerDBLock + forkers <- newTVarIO Map.empty + nextForkerKey <- newTVarIO (ForkerKey 0) + let env = LedgerDBEnv { + ldbChangelog = varDB + , ldbBackingStore = lgrBackingStore + , ldbLock = AllowThunk flushLock + , ldbPrevApplied = prevApplied + , ldbForkers = forkers + , ldbNextForkerKey = nextForkerKey + , ldbSnapshotPolicy = defaultSnapshotPolicy (ledgerDbCfgSecParam lgrConfig) lgrSnapshotPolicyArgs + , ldbTracer = lgrTracer + , ldbCfg = lgrConfig + , ldbHasFS = lgrHasFS' + , ldbShouldFlush = defaultShouldFlush flushFreq + , ldbQueryBatchSize = queryBatchSize + , ldbResolveBlock = getBlock + } + h <- LDBHandle <$> newTVarIO (LedgerDBOpen env) + pure $ implMkLedgerDb h + } + where + bsTracer = nullTracer --LedgerDBFlavorImplEvent . FlavorImplSpecificTraceV1 >$< lgrTracer + + LedgerDbArgs { + lgrHasFS + , lgrTracer + , lgrSnapshotPolicyArgs + , lgrConfig + , lgrGenesis + , lgrRegistry + } = args + + lgrHasFS' = SnapshotsFS lgrHasFS + + V1Args flushFreq queryBatchSize baArgs = bss + +implMkLedgerDb :: + forall m l blk. + ( IOLike m + , HasCallStack + , StandardHash l + , LedgerDbSerialiseConstraints blk + , LedgerSupportsProtocol blk + , MonadBase m m + , ApplyBlock l blk + , l ~ ExtLedgerState blk +#if __GLASGOW_HASKELL__ < 910 + , HasAnnTip blk +#endif + , HasHardForkHistory blk + ) + => LedgerDBHandle m l blk + -> (LedgerDB' m blk, TestInternals' m blk) +implMkLedgerDb h = (LedgerDB { + getVolatileTip = getEnvSTM h implGetVolatileTip + , getImmutableTip = getEnvSTM h implGetImmutableTip + , getPastLedgerState = getEnvSTM1 h implGetPastLedgerState + , getHeaderStateHistory = getEnvSTM h implGetHeaderStateHistory + , getForkerAtWellKnownPoint = newForkerAtWellKnownPoint h + , getForkerAtPoint = newForkerAtPoint h + , validate = getEnv5 h (implValidate h) + , getPrevApplied = getEnvSTM h implGetPrevApplied + , garbageCollect = getEnvSTM1 h implGarbageCollect + , tryTakeSnapshot = getEnv2 h implTryTakeSnapshot + , tryFlush = getEnv h implTryFlush + , closeDB = implCloseDB h + }, mkInternals h) + +implGetVolatileTip :: + (MonadSTM m, GetTip l) + => LedgerDBEnv m l blk + -> STM m (l EmptyMK) +implGetVolatileTip = fmap (current . anchorlessChangelog) . readTVar . ldbChangelog + +implGetImmutableTip :: + MonadSTM m + => LedgerDBEnv m l blk + -> STM m (l EmptyMK) +implGetImmutableTip = fmap (anchor . anchorlessChangelog) . readTVar . ldbChangelog + +implGetPastLedgerState :: + ( MonadSTM m , HasHeader blk, IsLedger l, StandardHash l + , HasLedgerTables l, HeaderHash l ~ HeaderHash blk ) + => LedgerDBEnv m l blk -> Point blk -> STM m (Maybe (l EmptyMK)) +implGetPastLedgerState env point = getPastLedgerAt point . anchorlessChangelog <$> readTVar (ldbChangelog env) + +implGetHeaderStateHistory :: + ( MonadSTM m + , l ~ ExtLedgerState blk + , IsLedger (LedgerState blk) + , HasHardForkHistory blk + , HasAnnTip blk + ) + => LedgerDBEnv m l blk -> STM m (HeaderStateHistory blk) +implGetHeaderStateHistory env = do + ldb <- anchorlessChangelog <$> readTVar (ldbChangelog env) + let currentLedgerState = ledgerState $ current ldb + -- This summary can convert all tip slots of the ledger states in the + -- @ledgerDb@ as these are not newer than the tip slot of the current + -- ledger state (Property 17.1 in the Consensus report). + summary = hardForkSummary (configLedger $ getExtLedgerCfg $ ledgerDbCfg $ ldbCfg env) currentLedgerState + mkHeaderStateWithTime' = + mkHeaderStateWithTimeFromSummary summary + . headerState + pure + . HeaderStateHistory + . AS.bimap mkHeaderStateWithTime' mkHeaderStateWithTime' + $ adcStates ldb + +implValidate :: + forall m l blk. ( + IOLike m + , LedgerSupportsProtocol blk + , HasCallStack + , l ~ ExtLedgerState blk + , MonadBase m m + ) + => LedgerDBHandle m l blk + -> LedgerDBEnv m l blk + -> ResourceRegistry m + -> (TraceValidateEvent blk -> m ()) + -> BlockCache blk + -> Word64 + -> [Header blk] + -> m (ValidateResult m (ExtLedgerState blk) blk) +implValidate h ldbEnv = + Validate.validate + (ldbResolveBlock ldbEnv) + (getExtLedgerCfg . ledgerDbCfg $ ldbCfg ldbEnv) + (\l -> do + prev <- readTVar (ldbPrevApplied ldbEnv) + writeTVar (ldbPrevApplied ldbEnv) (foldl' (flip Set.insert) prev l)) + (readTVar (ldbPrevApplied ldbEnv)) + (newForkerAtFromTip h) + + +implGetPrevApplied :: MonadSTM m => LedgerDBEnv m l blk -> STM m (Set (RealPoint blk)) +implGetPrevApplied env = readTVar (ldbPrevApplied env) + +-- | Remove all points with a slot older than the given slot from the set of +-- previously applied points. +implGarbageCollect :: MonadSTM m => LedgerDBEnv m l blk -> SlotNo -> STM m () +implGarbageCollect env slotNo = modifyTVar (ldbPrevApplied env) $ + Set.dropWhileAntitone ((< slotNo) . realPointSlot) + +implTryTakeSnapshot :: + ( l ~ ExtLedgerState blk + , IOLike m, LedgerDbSerialiseConstraints blk, LedgerSupportsProtocol blk + ) + => LedgerDBEnv m l blk -> Maybe (Time, Time) -> Word64 -> m SnapCounters +implTryTakeSnapshot env@LedgerDBEnv{ldbLock = AllowThunk lock} mTime nrBlocks = + if onDiskShouldTakeSnapshot (ldbSnapshotPolicy env) (uncurry (flip diffTime) <$> mTime) nrBlocks then do + void $ withReadLock lock (takeSnapshot + (ldbChangelog env) + (configCodec . getExtLedgerCfg . ledgerDbCfg $ ldbCfg env) + (LedgerDBSnapshotEvent >$< ldbTracer env) + (ldbHasFS env) + (ldbBackingStore env) + Nothing) + void $ trimSnapshots + (LedgerDBSnapshotEvent >$< ldbTracer env) + (snapshotsFs $ ldbHasFS env) + (ldbSnapshotPolicy env) + (`SnapCounters` 0) . Just <$> maybe getMonotonicTime (pure . snd) mTime + else + pure $ SnapCounters (fst <$> mTime) nrBlocks + +-- If the DbChangelog in the LedgerDB can flush (based on the SnapshotPolicy +-- with which this LedgerDB was opened), flush differences to the backing +-- store. Note this acquires a write lock on the backing store. +implTryFlush :: + (IOLike m, HasLedgerTables l, GetTip l) + => LedgerDBEnv m l blk -> m () +implTryFlush env@LedgerDBEnv{ldbLock = AllowThunk lock} = do + ldb <- readTVarIO $ ldbChangelog env + when (ldbShouldFlush env $ DbCh.flushableLength $ anchorlessChangelog ldb) + (withWriteLock + lock + (flushLedgerDB (ldbChangelog env) (ldbBackingStore env)) + ) + +implCloseDB :: IOLike m => LedgerDBHandle m l blk -> m () +implCloseDB (LDBHandle varState) = do + mbOpenEnv <- atomically $ readTVar varState >>= \case + -- Idempotent + LedgerDBClosed -> return Nothing + LedgerDBOpen env -> do + writeTVar varState LedgerDBClosed + return $ Just env + + -- Only when the LedgerDB was open + whenJust mbOpenEnv $ \env -> do + closeAllForkers env + bsClose (ldbBackingStore env) + +mkInternals :: + ( IOLike m + , LedgerDbSerialiseConstraints blk + , LedgerSupportsProtocol blk + , ApplyBlock (ExtLedgerState blk) blk + , MonadBase m m + + ) + => LedgerDBHandle m (ExtLedgerState blk) blk + -> TestInternals' m blk +mkInternals h = TestInternals { + takeSnapshotNOW = getEnv1 h implIntTakeSnapshot + , reapplyThenPushNOW = getEnv1 h implIntReapplyThenPushBlock + , wipeLedgerDB = getEnv h $ void . destroySnapshots . ldbHasFS + , closeLedgerDB = getEnv h $ bsClose . ldbBackingStore + , truncateSnapshots = getEnv h $ void . implIntTruncateSnapshots . ldbHasFS + } + +-- | Testing only! Destroy all snapshots in the DB. +destroySnapshots :: Monad m => SnapshotsFS m -> m () +destroySnapshots (SnapshotsFS (SomeHasFS fs)) = do + dirs <- Set.lookupMax . Set.filter (isJust . snapshotFromPath) <$> listDirectory fs (mkFsPath []) + mapM_ ((\d -> do + isDir <- doesDirectoryExist fs d + if isDir + then removeDirectoryRecursive fs d + else removeFile fs d + ) . mkFsPath . (:[])) dirs + +-- | Testing only! Truncate all snapshots in the DB. +implIntTruncateSnapshots :: MonadThrow m => SnapshotsFS m -> m () +implIntTruncateSnapshots (SnapshotsFS (SomeHasFS fs)) = do + dirs <- Set.lookupMax . Set.filter (isJust . snapshotFromPath) <$> listDirectory fs (mkFsPath []) + mapM_ (truncateRecursively . (:[])) dirs + where + truncateRecursively pre = do + dirs <- listDirectory fs (mkFsPath pre) + mapM_ (\d -> do + let d' = pre ++ [d] + isDir <- doesDirectoryExist fs $ mkFsPath d' + if isDir + then truncateRecursively d' + else withFile fs (mkFsPath d') (AppendMode AllowExisting) $ \h -> hTruncate fs h 0 + ) dirs + +implIntTakeSnapshot :: + ( IOLike m + , LedgerDbSerialiseConstraints blk + , LedgerSupportsProtocol blk + , l ~ ExtLedgerState blk + ) + => LedgerDBEnv m l blk -> Maybe DiskSnapshot -> m () +implIntTakeSnapshot env@LedgerDBEnv{ldbLock = AllowThunk lock} diskSnapshot = do + withWriteLock + lock + (flushLedgerDB (ldbChangelog env) (ldbBackingStore env)) + void $ withReadLock lock $ + takeSnapshot + (ldbChangelog env) + (configCodec . getExtLedgerCfg . ledgerDbCfg $ ldbCfg env) + (LedgerDBSnapshotEvent >$< ldbTracer env) + (ldbHasFS env) + (ldbBackingStore env) + diskSnapshot + +implIntReapplyThenPushBlock :: + ( IOLike m + , ApplyBlock l blk + , MonadBase m m + , l ~ ExtLedgerState blk + ) + => LedgerDBEnv m l blk -> blk -> m () +implIntReapplyThenPushBlock env blk = do + chlog <- readTVarIO $ ldbChangelog env + chlog' <- onChangelogM (reapplyThenPush (ldbCfg env) blk (readKeySets (ldbBackingStore env))) chlog + atomically $ writeTVar (ldbChangelog env) chlog' diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/LedgerDB/V1/Lock.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/LedgerDB/V1/Lock.hs new file mode 100644 index 0000000000..48abead325 --- /dev/null +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/LedgerDB/V1/Lock.hs @@ -0,0 +1,86 @@ +{-# LANGUAGE DerivingStrategies #-} +{-# LANGUAGE FlexibleContexts #-} +{-# LANGUAGE GeneralizedNewtypeDeriving #-} +{-# LANGUAGE MonoLocalBinds #-} +{-# LANGUAGE StandaloneDeriving #-} +{-# LANGUAGE TupleSections #-} +{-# LANGUAGE UndecidableInstances #-} + +module Ouroboros.Consensus.Storage.LedgerDB.V1.Lock ( + -- * LedgerDB lock + LedgerDBLock + , ReadLocked + , WriteLocked + , mkLedgerDBLock + , readLocked + , unsafeIgnoreWriteLock + , withReadLock + , withWriteLock + , writeLocked + ) where + +import qualified Control.RAWLock as Lock +import NoThunks.Class +import Ouroboros.Consensus.Util.IOLike + +{------------------------------------------------------------------------------- + LedgerDB lock +-------------------------------------------------------------------------------} + +-- | A lock to prevent the LedgerDB (i.e. a 'DbChangelog') from getting out of +-- sync with the 'BackingStore'. +-- +-- We rely on the capability of the @BackingStore@s of providing +-- 'BackingStoreValueHandles' that can be used to hold a persistent view of the +-- database as long as the handle is open. Assuming this functionality, the lock +-- is used in three ways: +-- +-- - Read lock to acquire a value handle: we do this when acquiring a view of the +-- 'LedgerDB' (which lives in a 'StrictTVar' at the 'ChainDB' level) and of +-- the 'BackingStore'. We momentarily acquire a read lock, consult the +-- transactional variable and also open a 'BackingStoreValueHandle'. This is +-- the case for ledger state queries and for the forging loop. +-- +-- - Read lock to ensure two operations are in sync: in the above situation, we +-- relied on the 'BackingStoreValueHandle' functionality, but sometimes we +-- won't access the values through a value handle, and instead we might use +-- the LMDB environment (as it is the case for 'lmdbCopy'). In these cases, we +-- acquire a read lock until we ended the copy, so that writers are blocked +-- until this process is completed. This is the case when taking a snapshot. +-- +-- - Write lock when flushing differences. +newtype LedgerDBLock m = LedgerDBLock (Lock.RAWLock m ()) + +deriving newtype instance NoThunks (Lock.RAWLock m ()) => NoThunks (LedgerDBLock m) + +mkLedgerDBLock :: IOLike m => m (LedgerDBLock m) +mkLedgerDBLock = LedgerDBLock <$> Lock.new () + +-- | An action in @m@ that has to hold the read lock. See @withReadLock@. +newtype ReadLocked m a = ReadLocked { runReadLocked :: m a } + deriving newtype (Functor, Applicative, Monad) + +-- | Enforce that the action has to be run while holding the read lock. +readLocked :: m a -> ReadLocked m a +readLocked = ReadLocked + +-- | Acquire the ledger DB read lock and hold it while performing an action +withReadLock :: IOLike m => LedgerDBLock m -> ReadLocked m a -> m a +withReadLock (LedgerDBLock lock) m = + Lock.withReadAccess lock (\() -> runReadLocked m) + +-- | An action in @m@ that has to hold the write lock. See @withWriteLock@. +newtype WriteLocked m a = WriteLocked { runWriteLocked :: m a } + deriving newtype (Functor, Applicative, Monad) + +unsafeIgnoreWriteLock :: WriteLocked m a -> m a +unsafeIgnoreWriteLock = runWriteLocked + +-- | Enforce that the action has to be run while holding the write lock. +writeLocked :: m a -> WriteLocked m a +writeLocked = WriteLocked + +-- | Acquire the ledger DB write lock and hold it while performing an action +withWriteLock :: IOLike m => LedgerDBLock m -> WriteLocked m a -> m a +withWriteLock (LedgerDBLock lock) m = + Lock.withWriteAccess lock (\() -> (,()) <$> runWriteLocked m) diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/LedgerDB/V1/Snapshots.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/LedgerDB/V1/Snapshots.hs new file mode 100644 index 0000000000..b555e99776 --- /dev/null +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/LedgerDB/V1/Snapshots.hs @@ -0,0 +1,255 @@ +{-# LANGUAGE DataKinds #-} +{-# LANGUAGE FlexibleContexts #-} + +{- | Snapshots + + Snapshotting a ledger state means saving a copy of the in-memory part of the + ledger state serialized as a file on disk, as well as flushing differences on + the ledger tables between the last snapshotted ledger state and the one that + we are snapshotting now and making a copy of that resulting on-disk state. + + == Startup + + During initialisation, the goal is to construct an initial 'LedgerDB' where + the sequence of in-memory states is empty except for the ledger state at the + anchor or the 'DbChangelog', which has to correspond to the immutable tip, + i.e., the block at the tip of the Immutable DB. + + Ideally, we can construct the initial 'LedgerDB' from a snapshot of the ledger + state that we wrote to disk. Remember that updating a ledger state with a + block is not invertible: we can apply a block to a ledger state, but we cannot + /unapply/ a block to a ledger state. This means the snapshot has to be at + least as old as the anchor. A snapshot matching the anchor can be used as is. + A snapshot older than the anchor can be used after reapplying the necessary + blocks. A snapshot newer than the anchor can /not/ be used, as we cannot + unapply blocks to get the ledger state corresponding to the anchor. This is + the reason why we only take snapshots of an immutable ledger state, i.e., of + the anchor of the 'DbChangelog' (or older). + + On startup, the node will: + + 1. Find the latest snapshot which will be a directory inside @\\/\@ named as the slot number of the ledger state that + was snapshotted: + + > + > ├── volatile + > ├── immutable + > └── ledger + > ├── + > │   ├── tables + > │   └── state + > ├── ... + > └── + >    ├── tables + >    └── state + + The @tables@ file is a serialization of the in-memory part of the ledger + state with empty tables (i.e. a @ExtLedgerState blk EmptyMK@), and + @tables@ will store a persistent copy of the 'LedgerTable's. Depending on + the 'BackingStore' implementation in use, this might be a file or a + directory. + + 2. Depending on the snapshots found, there are two possibilities: + + - If there is no snapshot to load, create a new @'BackingStore'@ with the + contents of the Genesis ledger tables and finish. + + - If there is a snapshot found, then deserialize (with @DecodeDisk@) the + @state@ file. If deserialization fails, delete this snapshot and start + again. If the snapshot is newer than the immutable tip, delete this + snapshot and start again. + + In case we found an snapshot, we will overwrite (either literally + overwriting it or using some feature from the specific backend used) the + @BackingStore@ tables with the contents from @tables@ from said snapshot + as it was left in whatever state it was when the node shut down. + + 3. The deserialized ledger state and tables will be then used as the initial + ledger state for the ledger database. + + 4. Reapply the immutable blocks after the snapshot to obtain the ledger state + at the immutable tip. The blocks to reapply are streamed from the Immutable + DB, using an iterator. + + Note that we can /reapply/ these blocks, which is quicker than applying + them, as the existence of a snapshot newer than these blocks proves (unless + the on-disk database has been tampered with, but this is not an attack we + intend to protect against, as this would mean the machine has already been + compromised) that they have been successfully applied in the past. + + Reading and applying blocks is costly. Typically, very few blocks need to be + reapplied in practice. However, there is one exception: when the serialisation + format of the ledger state changes, all snapshots (written using the old + serialisation format) will fail to deserialise, and all blocks starting from + genesis will have to be reapplied. + + At this point, the node carries a @DbChangelog@ that is initialized and ready + to be applied blocks on the volatile database. + + == Taking snapshots during normal operation + + Snapshots are taken by the @'copyAndSnapshotRunner'@ when the disk policy + dictates to do so. Whenever the chain grows past @k@ blocks, said runner will + copy the blocks which are more than @k@ blocks from the tip (i.e. the ones + that must be considered immutable) to the immutable database and then: + + 1. Every time we have processed a specific amount of blocks since the last + flush (set by default to 100), perform a flush of differences in the + 'DbChangelog' up to the immutable db tip. + + 2. If dictated by the disk policy, flush immediately all the differences up to + the immutable db tip and serialize (using 'EncodeDisk') the DbChangelog + in-memory ledger states anchor (@ExtLedgerState blk EmptyMK@). + + A directory is created named after the slot number of the ledger state + being snapshotted, and the serialization from above is written into the + @\/state@ file and the @BackingStore@ tables are copied into + the @\/tables@ file. + + 3. There is a maximum number of snapshots that should exist in the disk at any + time, dictated by the @DiskPolicy@, so if needed, we will trim out old + snapshots. + + == Flush during startup and snapshot at the end of startup + + Due to the nature of the database having to carry around all the differences + between the last snapshotted state and the current tip, there is a need to + flush when replaying the chain as otherwise, for example on a replay from + genesis to the tip, we would carry millions of differences in memory. + + Because of this, when we are replaying blocks we will flush regularly. As the + last snapshot that was taken lives in a @\/tables@ file, there is + no risk of destroying it (overwriting tables at another earlier snapshot) by + flushing. Only when we finish replaying blocks and start the background + threads (and specifically the @copyAndSnapshotRunner@), we will take a + snapshot of the current immutable database anchor as described above. + +-------------------------------------------------------------------------------} + +module Ouroboros.Consensus.Storage.LedgerDB.V1.Snapshots ( + loadSnapshot + , takeSnapshot + -- * Testing + , snapshotToStatePath + , snapshotToTablesPath + ) where + +import Codec.CBOR.Encoding +import Codec.Serialise +import Control.Monad.Except +import Control.Tracer +import qualified Data.List as List +import Data.Maybe (fromMaybe) +import Ouroboros.Consensus.Block +import Ouroboros.Consensus.Ledger.Abstract +import Ouroboros.Consensus.Ledger.Extended +import Ouroboros.Consensus.Ledger.SupportsProtocol +import Ouroboros.Consensus.Storage.LedgerDB.Impl.Common +import Ouroboros.Consensus.Storage.LedgerDB.Impl.Snapshots +import Ouroboros.Consensus.Storage.LedgerDB.V1.Args +import Ouroboros.Consensus.Storage.LedgerDB.V1.BackingStore +import qualified Ouroboros.Consensus.Storage.LedgerDB.V1.BackingStore as V1 +import Ouroboros.Consensus.Storage.LedgerDB.V1.DbChangelog +import Ouroboros.Consensus.Storage.LedgerDB.V1.Lock +import Ouroboros.Consensus.Util.Args (Complete) +import Ouroboros.Consensus.Util.IOLike +import System.FS.API + +-- | Try to take a snapshot of the /oldest ledger state/ in the ledger DB +-- +-- We write the /oldest/ ledger state to disk because the intention is to only +-- write ledger states to disk that we know to be immutable. Primarily for +-- testing purposes, 'takeSnapshot' returns the block reference corresponding +-- to the snapshot that we wrote. +-- +-- If a snapshot with the same number already exists on disk or if the tip is at +-- genesis, no snapshot is taken. +-- +-- Note that an EBB can have the same slot number and thus snapshot number as +-- the block after it. This doesn't matter. The one block difference in the +-- ledger state doesn't warrant an additional snapshot. The number in the name +-- of the snapshot is only indicative, we don't rely on it being correct. +-- +-- NOTE: This is a lower-level API that takes a snapshot independent from +-- whether this snapshot corresponds to a state that is more than @k@ back. +-- +-- TODO: Should we delete the file if an error occurs during writing? +takeSnapshot :: + ( IOLike m + , LedgerDbSerialiseConstraints blk + , LedgerSupportsProtocol blk + ) + => StrictTVar m (DbChangelog' blk) + -> CodecConfig blk + -> Tracer m (TraceSnapshotEvent blk) + -> SnapshotsFS m + -> BackingStore' m blk + -> Maybe DiskSnapshot -- ^ Override for snapshot numbering + -> ReadLocked m (Maybe (DiskSnapshot, RealPoint blk)) +takeSnapshot ldbvar ccfg tracer (SnapshotsFS hasFS') backingStore dsOverride = readLocked $ do + state <- changelogLastFlushedState <$> readTVarIO ldbvar + case pointToWithOriginRealPoint (castPoint (getTip state)) of + Origin -> + return Nothing + NotOrigin t -> do + let number = unSlotNo (realPointSlot t) + snapshot = fromMaybe (DiskSnapshot number Nothing) dsOverride + diskSnapshots <- listSnapshots hasFS' + if List.any ((== number) . dsNumber) diskSnapshots then + return Nothing + else do + writeSnapshot hasFS' backingStore (encodeDiskExtLedgerState ccfg) snapshot state + traceWith tracer $ TookSnapshot snapshot t + return $ Just (snapshot, t) + +-- | Write snapshot to disk +writeSnapshot :: + MonadThrow m + => SomeHasFS m + -> BackingStore' m blk + -> (ExtLedgerState blk EmptyMK -> Encoding) + -> DiskSnapshot + -> ExtLedgerState blk EmptyMK + -> m () +writeSnapshot fs@(SomeHasFS hasFS) backingStore encLedger snapshot cs = do + createDirectory hasFS (snapshotToDirPath snapshot) + writeExtLedgerState fs encLedger (snapshotToStatePath snapshot) cs + bsCopy + backingStore + (snapshotToTablesPath snapshot) + +-- | The path within the LedgerDB's filesystem to the file that contains the +-- snapshot's serialized ledger state +snapshotToStatePath :: DiskSnapshot -> FsPath +snapshotToStatePath = mkFsPath . (\x -> [x, "state"]) . snapshotToDirName + +-- | The path within the LedgerDB's filesystem to the directory that contains a +-- snapshot's backing store +snapshotToTablesPath :: DiskSnapshot -> FsPath +snapshotToTablesPath = mkFsPath . (\x -> [x, "tables"]) . snapshotToDirName + +loadSnapshot :: + ( IOLike m + , LedgerDbSerialiseConstraints blk + , LedgerSupportsProtocol blk + ) + => Tracer m V1.FlavorImplSpecificTrace + -> Complete BackingStoreArgs m + -> CodecConfig blk + -> SnapshotsFS m + -> DiskSnapshot + -> m (Either + (SnapshotFailure blk) + ((DbChangelog' blk, LedgerBackingStore m (ExtLedgerState blk)), RealPoint blk)) +loadSnapshot tracer bss ccfg fs@(SnapshotsFS fs') s = do + eExtLedgerSt <- runExceptT $ readExtLedgerState fs' (decodeDiskExtLedgerState ccfg) decode (snapshotToStatePath s) + case eExtLedgerSt of + Left err -> pure (Left $ InitFailureRead err) + Right extLedgerSt -> do + case pointToWithOriginRealPoint (castPoint (getTip extLedgerSt)) of + Origin -> pure (Left InitFailureGenesis) + NotOrigin pt -> do + backingStore <- restoreBackingStore tracer bss fs (snapshotToTablesPath s) + let chlog = empty extLedgerSt + pure (Right ((chlog, backingStore), pt)) diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/LedgerDB/V2/Args.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/LedgerDB/V2/Args.hs new file mode 100644 index 0000000000..f08d5789d7 --- /dev/null +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/LedgerDB/V2/Args.hs @@ -0,0 +1,32 @@ +{-# LANGUAGE DataKinds #-} +{-# LANGUAGE DeriveAnyClass #-} +{-# LANGUAGE DeriveGeneric #-} +{-# LANGUAGE FlexibleInstances #-} +{-# LANGUAGE GADTs #-} +{-# LANGUAGE MultiParamTypeClasses #-} +{-# LANGUAGE PolyKinds #-} +{-# LANGUAGE TypeFamilies #-} + +{-# OPTIONS_GHC -Wno-orphans #-} + +module Ouroboros.Consensus.Storage.LedgerDB.V2.Args ( + FlavorImplSpecificTrace (..) + , HandleArgs (..) + , LedgerDbFlavorArgs (..) + ) where + +import GHC.Generics +import NoThunks.Class + +data LedgerDbFlavorArgs f m = V2Args HandleArgs + +data HandleArgs = + InMemoryHandleArgs + -- TODO + -- | LSMHandleArgs + deriving (Generic, NoThunks) + +data FlavorImplSpecificTrace = + FlavorImplSpecificTraceInMemory + | FlavorImplSpecificTraceOnDisk + deriving (Show, Eq) diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/LedgerDB/V2/Common.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/LedgerDB/V2/Common.hs new file mode 100644 index 0000000000..bbab3d63f7 --- /dev/null +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/LedgerDB/V2/Common.hs @@ -0,0 +1,534 @@ +{-# LANGUAGE DataKinds #-} +{-# LANGUAGE DeriveAnyClass #-} +{-# LANGUAGE DeriveGeneric #-} +{-# LANGUAGE DerivingVia #-} +{-# LANGUAGE FlexibleContexts #-} +{-# LANGUAGE FlexibleInstances #-} +{-# LANGUAGE GeneralizedNewtypeDeriving #-} +{-# LANGUAGE LambdaCase #-} +{-# LANGUAGE MultiParamTypeClasses #-} +{-# LANGUAGE NamedFieldPuns #-} +{-# LANGUAGE NumericUnderscores #-} +{-# LANGUAGE ScopedTypeVariables #-} +{-# LANGUAGE StandaloneDeriving #-} +{-# LANGUAGE StandaloneKindSignatures #-} +{-# LANGUAGE TupleSections #-} +{-# LANGUAGE TypeApplications #-} +{-# LANGUAGE TypeFamilies #-} +{-# LANGUAGE TypeOperators #-} +{-# LANGUAGE UndecidableInstances #-} + +module Ouroboros.Consensus.Storage.LedgerDB.V2.Common ( + -- * LedgerDBEnv + LDBLock (..) + , LedgerDBEnv (..) + , LedgerDBHandle (..) + , LedgerDBState (..) + , closeAllForkers + , getEnv + , getEnv2 + , getEnv5 + , getEnvSTM + , getEnvSTM1 + -- * Forkers + , newForkerAtFromTip + , newForkerAtPoint + , newForkerAtWellKnownPoint + ) where + +import Control.Arrow +import Control.Monad ((>=>)) +import Control.RAWLock (RAWLock) +import qualified Control.RAWLock as RAWLock +import Control.ResourceRegistry +import Control.Tracer +import Data.Functor.Contravariant ((>$<)) +import Data.Kind +import Data.Map (Map) +import qualified Data.Map.Strict as Map +import Data.Maybe (fromMaybe) +import Data.Set (Set) +import Data.Word +import GHC.Generics +import NoThunks.Class +import Ouroboros.Consensus.Block +import Ouroboros.Consensus.Config +import Ouroboros.Consensus.Ledger.Abstract +import Ouroboros.Consensus.Ledger.SupportsProtocol +import Ouroboros.Consensus.Ledger.Tables.Utils +import Ouroboros.Consensus.Storage.LedgerDB.API +import Ouroboros.Consensus.Storage.LedgerDB.API.Config +import Ouroboros.Consensus.Storage.LedgerDB.Impl.Common +import Ouroboros.Consensus.Storage.LedgerDB.Impl.Snapshots +import Ouroboros.Consensus.Storage.LedgerDB.Impl.Validate +import Ouroboros.Consensus.Storage.LedgerDB.V2.LedgerSeq +import Ouroboros.Consensus.Util +import Ouroboros.Consensus.Util.CallStack +import Ouroboros.Consensus.Util.IOLike +import Ouroboros.Consensus.Util.NormalForm.StrictTVar () +import qualified Ouroboros.Network.AnchoredSeq as AS +import Ouroboros.Network.Protocol.LocalStateQuery.Type +import Prelude hiding (read) +import System.FS.API + +{------------------------------------------------------------------------------- + The LedgerDBEnv +-------------------------------------------------------------------------------} + +data LDBLock = LDBLock deriving (Generic, NoThunks) + +type LedgerDBEnv :: (Type -> Type) -> LedgerStateKind -> Type -> Type +data LedgerDBEnv m l blk = LedgerDBEnv { + -- | INVARIANT: the tip of the 'LedgerDB' is always in sync with the tip of + -- the current chain of the ChainDB. + ldbSeq :: !(StrictTVar m (LedgerSeq m l)) + -- | INVARIANT: this set contains only points that are in the + -- VolatileDB. + -- + -- INVARIANT: all points on the current chain fragment are in this set. + -- + -- The VolatileDB might contain invalid blocks, these will not be in + -- this set. + -- + -- When a garbage-collection is performed on the VolatileDB, the points + -- of the blocks eligible for garbage-collection should be removed from + -- this set. + , ldbPrevApplied :: !(StrictTVar m (Set (RealPoint blk))) + -- | Open forkers. + -- + -- INVARIANT: a forker is open iff its 'ForkerKey' is in this 'Map. + , ldbForkers :: !(StrictTVar m (Map ForkerKey (ForkerEnv m l blk))) + , ldbNextForkerKey :: !(StrictTVar m ForkerKey) + + , ldbSnapshotPolicy :: !SnapshotPolicy + , ldbTracer :: !(Tracer m (TraceLedgerDBEvent blk)) + , ldbCfg :: !(LedgerDbCfg l) + , ldbHasFS :: !(SomeHasFS m) + , ldbResolveBlock :: !(ResolveBlock m blk) + , ldbQueryBatchSize :: !(Maybe Int) + , ldbReleaseLock :: !(AllowThunk (RAWLock m LDBLock)) + } deriving (Generic) + +deriving instance ( IOLike m + , LedgerSupportsProtocol blk + , NoThunks (l EmptyMK) + , NoThunks (Key l) + , NoThunks (Value l) + , NoThunks (LedgerCfg l) + ) => NoThunks (LedgerDBEnv m l blk) + +{------------------------------------------------------------------------------- + The LedgerDBHandle +-------------------------------------------------------------------------------} + +type LedgerDBHandle :: (Type -> Type) -> LedgerStateKind -> Type -> Type +newtype LedgerDBHandle m l blk = + LDBHandle (StrictTVar m (LedgerDBState m l blk)) + deriving Generic + +data LedgerDBState m l blk = + LedgerDBOpen !(LedgerDBEnv m l blk) + | LedgerDBClosed + deriving Generic + +deriving instance ( IOLike m + , LedgerSupportsProtocol blk + , NoThunks (l EmptyMK) + , NoThunks (Key l) + , NoThunks (Value l) + , NoThunks (LedgerCfg l) + ) => NoThunks (LedgerDBState m l blk) + + +-- | Check if the LedgerDB is open, if so, executing the given function on the +-- 'LedgerDBEnv', otherwise, throw a 'CloseDBError'. +getEnv :: + forall m l blk r. (IOLike m, HasCallStack, HasHeader blk) + => LedgerDBHandle m l blk + -> (LedgerDBEnv m l blk -> m r) + -> m r +getEnv (LDBHandle varState) f = readTVarIO varState >>= \case + LedgerDBOpen env -> f env + LedgerDBClosed -> throwIO $ ClosedDBError @blk prettyCallStack + +-- | Variant 'of 'getEnv' for functions taking two arguments. +getEnv2 :: + (IOLike m, HasCallStack, HasHeader blk) + => LedgerDBHandle m l blk + -> (LedgerDBEnv m l blk -> a -> b -> m r) + -> a -> b -> m r +getEnv2 h f a b = getEnv h (\env -> f env a b) + +-- | Variant 'of 'getEnv' for functions taking five arguments. +getEnv5 :: + (IOLike m, HasCallStack, HasHeader blk) + => LedgerDBHandle m l blk + -> (LedgerDBEnv m l blk -> a -> b -> c -> d -> e -> m r) + -> a -> b -> c -> d -> e -> m r +getEnv5 h f a b c d e = getEnv h (\env -> f env a b c d e) + +-- | Variant of 'getEnv' that works in 'STM'. +getEnvSTM :: + forall m l blk r. (IOLike m, HasCallStack, HasHeader blk) + => LedgerDBHandle m l blk + -> (LedgerDBEnv m l blk -> STM m r) + -> STM m r +getEnvSTM (LDBHandle varState) f = readTVar varState >>= \case + LedgerDBOpen env -> f env + LedgerDBClosed -> throwSTM $ ClosedDBError @blk prettyCallStack + +-- | Variant of 'getEnv1' that works in 'STM'. +getEnvSTM1 :: + forall m l blk a r. (IOLike m, HasCallStack, HasHeader blk) + => LedgerDBHandle m l blk + -> (LedgerDBEnv m l blk -> a -> STM m r) + -> a -> STM m r +getEnvSTM1 (LDBHandle varState) f a = readTVar varState >>= \case + LedgerDBOpen env -> f env a + LedgerDBClosed -> throwSTM $ ClosedDBError @blk prettyCallStack + +{------------------------------------------------------------------------------- + Forker operations +-------------------------------------------------------------------------------} + +data ForkerEnv m l blk = ForkerEnv { + -- | Local version of the LedgerSeq + foeLedgerSeq :: !(StrictTVar m (LedgerSeq m l)) + -- | This TVar is the same as the LedgerDB one + , foeSwitchVar :: !(StrictTVar m (LedgerSeq m l)) + -- | Config + , foeSecurityParam :: !SecurityParam + -- | The batch size + , foeQueryBatchSize :: !(Maybe Int) + -- | Config + , foeTracer :: !(Tracer m TraceForkerEvent) + -- | Release the resources + , foeResourcesToRelease :: !(StrictTVar m [m ()]) + } + deriving Generic + +closeForkerEnv :: IOLike m => (LedgerDBEnv m l blk, ForkerEnv m l blk) -> m () +closeForkerEnv (LedgerDBEnv{ldbReleaseLock = AllowThunk lock}, frkEnv) = + RAWLock.withWriteAccess lock $ + const $ do + sequence_ =<< readTVarIO (foeResourcesToRelease frkEnv) + pure ((), LDBLock) + +deriving instance ( IOLike m + , LedgerSupportsProtocol blk + , NoThunks (l EmptyMK) + , NoThunks (Key l) + , NoThunks (Value l) + ) => NoThunks (ForkerEnv m l blk) + +getForkerEnv :: + forall m l blk r. (IOLike m, HasCallStack, HasHeader blk) + => LedgerDBHandle m l blk + -> ForkerKey + -> (ForkerEnv m l blk -> m r) + -> m r +getForkerEnv (LDBHandle varState) forkerKey f = do + forkerEnv <- atomically $ readTVar varState >>= \case + LedgerDBClosed -> throwIO $ ClosedDBError @blk prettyCallStack + LedgerDBOpen env -> readTVar (ldbForkers env) >>= (Map.lookup forkerKey >>> \case + Nothing -> throwSTM $ ClosedForkerError @blk forkerKey prettyCallStack + Just forkerEnv -> pure forkerEnv) + f forkerEnv + +getForkerEnv1 :: + (IOLike m, HasCallStack, HasHeader blk) + => LedgerDBHandle m l blk + -> ForkerKey + -> (ForkerEnv m l blk -> a -> m r) + -> a -> m r +getForkerEnv1 h forkerKey f a = getForkerEnv h forkerKey (`f` a) + +getForkerEnvSTM :: + forall m l blk r. (IOLike m, HasCallStack, HasHeader blk) + => LedgerDBHandle m l blk + -> ForkerKey + -> (ForkerEnv m l blk -> STM m r) + -> STM m r +getForkerEnvSTM (LDBHandle varState) forkerKey f = readTVar varState >>= \case + LedgerDBClosed -> throwIO $ ClosedDBError @blk prettyCallStack + LedgerDBOpen env -> readTVar (ldbForkers env) >>= (Map.lookup forkerKey >>> \case + Nothing -> throwSTM $ ClosedForkerError @blk forkerKey prettyCallStack + Just forkerEnv -> f forkerEnv) + +newForker :: + ( IOLike m + , HasLedgerTables l + , LedgerSupportsProtocol blk + , NoThunks (l EmptyMK) + , GetTip l + , StandardHash l + ) + => LedgerDBHandle m l blk + -> LedgerDBEnv m l blk + -> ResourceRegistry m + -> StateRef m l + -> m (Forker m l blk) +newForker h ldbEnv rr st = do + forkerKey <- atomically $ stateTVar (ldbNextForkerKey ldbEnv) $ \r -> (r, r + 1) + let tr = LedgerDBForkerEvent . TraceForkerEventWithKey forkerKey >$< ldbTracer ldbEnv + traceWith tr ForkerOpen + lseqVar <- newTVarIO . LedgerSeq . AS.Empty $ st + (_, toRelease) <- allocate rr (\_ -> newTVarIO []) (readTVarIO >=> sequence_) + let forkerEnv = ForkerEnv { + foeLedgerSeq = lseqVar + , foeSwitchVar = ldbSeq ldbEnv + , foeSecurityParam = ledgerDbCfgSecParam $ ldbCfg ldbEnv + , foeQueryBatchSize = ldbQueryBatchSize ldbEnv + , foeTracer = tr + , foeResourcesToRelease = toRelease + } + atomically $ modifyTVar (ldbForkers ldbEnv) $ Map.insert forkerKey forkerEnv + pure $ Forker { + forkerReadTables = getForkerEnv1 h forkerKey implForkerReadTables + , forkerRangeReadTables = getForkerEnv1 h forkerKey implForkerRangeReadTables + , forkerGetLedgerState = getForkerEnvSTM h forkerKey implForkerGetLedgerState + , forkerReadStatistics = getForkerEnv h forkerKey implForkerReadStatistics + , forkerPush = getForkerEnv1 h forkerKey implForkerPush + , forkerCommit = getForkerEnvSTM h forkerKey implForkerCommit + , forkerClose = implForkerClose h forkerKey + } + +-- | Will release all handles in the 'foeLedgerSeq'. +implForkerClose :: + IOLike m + => LedgerDBHandle m l blk + -> ForkerKey + -> m () +implForkerClose (LDBHandle varState) forkerKey = do + menv <- atomically $ readTVar varState >>= \case + LedgerDBClosed -> pure Nothing + LedgerDBOpen ldbEnv -> fmap (ldbEnv,) <$> + stateTVar + (ldbForkers ldbEnv) + (Map.updateLookupWithKey (\_ _ -> Nothing) forkerKey) + whenJust menv closeForkerEnv + +implForkerReadTables :: + (MonadSTM m, GetTip l) + => ForkerEnv m l blk + -> LedgerTables l KeysMK + -> m (LedgerTables l ValuesMK) +implForkerReadTables env ks = do + traceWith (foeTracer env) ForkerReadTablesStart + lseq <- readTVarIO (foeLedgerSeq env) + tbs <- read (tables $ currentHandle lseq) ks + traceWith (foeTracer env) ForkerReadTablesEnd + pure tbs + +implForkerRangeReadTables :: + (MonadSTM m, GetTip l, HasLedgerTables l) + => ForkerEnv m l blk + -> RangeQueryPrevious l + -> m (LedgerTables l ValuesMK) +implForkerRangeReadTables env rq0 = do + traceWith (foeTracer env) ForkerRangeReadTablesStart + ldb <- readTVarIO $ foeLedgerSeq env + let n = maybe 100_000 id $ foeQueryBatchSize env + case rq0 of + NoPreviousQuery -> readRange (tables $ currentHandle ldb) (Nothing, n) + PreviousQueryWasFinal -> pure $ LedgerTables emptyMK + PreviousQueryWasUpTo k -> do + LedgerTables (ValuesMK m) <- readRange (tables $ currentHandle ldb) (Just k, n) + let tbs = LedgerTables $ ValuesMK $ snd $ Map.split k m + traceWith (foeTracer env) ForkerRangeReadTablesEnd + pure tbs + +implForkerGetLedgerState :: + (MonadSTM m, GetTip l) + => ForkerEnv m l blk + -> STM m (l EmptyMK) +implForkerGetLedgerState env = current <$> readTVar (foeLedgerSeq env) + +implForkerReadStatistics :: + (MonadSTM m, GetTip l) + => ForkerEnv m l blk + -> m (Maybe Statistics) +implForkerReadStatistics env = do + traceWith (foeTracer env) ForkerReadStatistics + fmap (fmap Statistics) . tablesSize . tables . currentHandle =<< readTVarIO (foeLedgerSeq env) + +implForkerPush :: + (IOLike m, GetTip l, HasLedgerTables l, HasCallStack) + => ForkerEnv m l blk + -> l DiffMK + -> m () +implForkerPush env newState = do + traceWith (foeTracer env) ForkerPushStart + lseq <- readTVarIO (foeLedgerSeq env) + let (st, tbs) = (forgetLedgerTables newState, ltprj newState) + + bracketOnError + (duplicate (tables $ currentHandle lseq)) + close + (\newtbs -> do + write newtbs tbs + + let lseq' = extend (StateRef st newtbs) lseq + + traceWith (foeTracer env) ForkerPushEnd + atomically $ do + writeTVar (foeLedgerSeq env) lseq' + modifyTVar (foeResourcesToRelease env) (close newtbs :) + ) + +implForkerCommit :: + (IOLike m, GetTip l, StandardHash l) + => ForkerEnv m l blk + -> STM m () +implForkerCommit env = do + LedgerSeq lseq <- readTVar foeLedgerSeq + let intersectionSlot = getTipSlot $ state $ AS.anchor lseq + let predicate = (== getTipHash (state (AS.anchor lseq))) . getTipHash . state + (statesToClose, LedgerSeq statesDiscarded) <- do + stateTVar + foeSwitchVar + (\(LedgerSeq olddb) -> fromMaybe theImpossible $ do + (olddb', toClose) <- AS.splitAfterMeasure intersectionSlot (either predicate predicate) olddb + newdb <- AS.join (const $ const True) olddb' lseq + let (l, s) = prune (foeSecurityParam env) (LedgerSeq newdb) + pure ((toClose, l), s) + ) + + writeTVar foeResourcesToRelease $ + map (close . tables) $ AS.toOldestFirst statesToClose ++ AS.toOldestFirst statesDiscarded + + where + ForkerEnv { + foeLedgerSeq + , foeSwitchVar + , foeResourcesToRelease + } = env + + theImpossible = + error $ unwords [ "Critical invariant violation:" + , "Forker chain does no longer intersect with selected chain." + ] + +{------------------------------------------------------------------------------- + Acquiring consistent views +-------------------------------------------------------------------------------} + +-- | This function must hold the 'LDBLock' such that handles are not released +-- before they are duplicated. +acquireAtWellKnownPoint :: + (IOLike m, GetTip l, StandardHash blk) + => LedgerDBEnv m l blk + -> Target (Point blk) + -> LDBLock + -> m (StateRef m l) +acquireAtWellKnownPoint ldbEnv VolatileTip _ = do + l <- readTVarIO (ldbSeq ldbEnv) + let StateRef st tbs = currentHandle l + t <- duplicate tbs + pure (StateRef st t) +acquireAtWellKnownPoint ldbEnv ImmutableTip _ = do + l <- readTVarIO (ldbSeq ldbEnv) + let StateRef st tbs = anchorHandle l + t <- duplicate tbs + pure (StateRef st t) +acquireAtWellKnownPoint _ (SpecificPoint pt) _ = + error $ "calling acquireAtWellKnownPoint for a not well-known point: " <> show pt + +-- | This function must hold the 'LDBLock' such that handles are not released +-- before they are duplicated. +acquireAtPoint :: + forall m l blk. ( + HeaderHash l ~ HeaderHash blk + , IOLike m + , IsLedger l + , StandardHash l + , LedgerSupportsProtocol blk + ) + => LedgerDBEnv m l blk + -> Point blk + -> LDBLock + -> m (Either GetForkerError (StateRef m l)) +acquireAtPoint ldbEnv pt _ = do + dblog <- readTVarIO (ldbSeq ldbEnv) + let immTip = getTip $ anchor dblog + case currentHandle <$> rollback pt dblog of + Nothing | pointSlot pt < pointSlot immTip -> pure $ Left PointTooOld + | otherwise -> pure $ Left PointNotOnChain + Just (StateRef st tbs) -> + Right . StateRef st <$> duplicate tbs + +-- | This function must hold the 'LDBLock' such that handles are not released +-- before they are duplicated. +acquireAtFromTip :: + forall m l blk. ( + IOLike m + , IsLedger l + ) + => LedgerDBEnv m l blk + -> Word64 + -> LDBLock + -> m (Either ExceededRollback (StateRef m l)) +acquireAtFromTip ldbEnv n _ = do + dblog <- readTVarIO (ldbSeq ldbEnv) + case currentHandle <$> rollbackN n dblog of + Nothing -> + return $ Left $ ExceededRollback { + rollbackMaximum = maxRollback dblog + , rollbackRequested = n + } + Just (StateRef st tbs) -> + Right . StateRef st <$> duplicate tbs + +newForkerAtWellKnownPoint :: + ( IOLike m + , IsLedger l + , HasLedgerTables l + , LedgerSupportsProtocol blk + , StandardHash l + ) + => LedgerDBHandle m l blk + -> ResourceRegistry m + -> Target (Point blk) + -> m (Forker m l blk) +newForkerAtWellKnownPoint h rr pt = getEnv h $ \ldbEnv@LedgerDBEnv{ldbReleaseLock = AllowThunk lock} -> do + RAWLock.withReadAccess lock (acquireAtWellKnownPoint ldbEnv pt) >>= newForker h ldbEnv rr + +newForkerAtPoint :: + ( HeaderHash l ~ HeaderHash blk + , IOLike m + , IsLedger l + , StandardHash l + , HasLedgerTables l + , LedgerSupportsProtocol blk + ) + => LedgerDBHandle m l blk + -> ResourceRegistry m + -> Point blk + -> m (Either GetForkerError (Forker m l blk)) +newForkerAtPoint h rr pt = getEnv h $ \ldbEnv@LedgerDBEnv{ldbReleaseLock = AllowThunk lock} -> do + RAWLock.withReadAccess lock (acquireAtPoint ldbEnv pt) >>= traverse (newForker h ldbEnv rr) + +newForkerAtFromTip :: + ( IOLike m + , IsLedger l + , HasLedgerTables l + , LedgerSupportsProtocol blk + , StandardHash l + ) + => LedgerDBHandle m l blk + -> ResourceRegistry m + -> Word64 + -> m (Either ExceededRollback (Forker m l blk)) +newForkerAtFromTip h rr n = getEnv h $ \ldbEnv@LedgerDBEnv{ldbReleaseLock = AllowThunk lock} -> do + RAWLock.withReadAccess lock (acquireAtFromTip ldbEnv n) >>= traverse (newForker h ldbEnv rr) + +-- | Close all open block and header 'Follower's. +closeAllForkers :: + IOLike m + => LedgerDBEnv m l blk + -> m () +closeAllForkers ldbEnv = do + toClose <- fmap (ldbEnv,) <$> (atomically $ stateTVar forkersVar (, Map.empty)) + mapM_ closeForkerEnv toClose + where + forkersVar = ldbForkers ldbEnv diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/LedgerDB/V2/InMemory.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/LedgerDB/V2/InMemory.hs new file mode 100644 index 0000000000..3459409032 --- /dev/null +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/LedgerDB/V2/InMemory.hs @@ -0,0 +1,202 @@ +{-# LANGUAGE BangPatterns #-} +{-# LANGUAGE DataKinds #-} +{-# LANGUAGE DeriveAnyClass #-} +{-# LANGUAGE DeriveGeneric #-} +{-# LANGUAGE DerivingVia #-} +{-# LANGUAGE EmptyDataDeriving #-} +{-# LANGUAGE FlexibleContexts #-} +{-# LANGUAGE FlexibleInstances #-} +{-# LANGUAGE GADTs #-} +{-# LANGUAGE GeneralizedNewtypeDeriving #-} +{-# LANGUAGE MultiParamTypeClasses #-} +{-# LANGUAGE QuantifiedConstraints #-} +{-# LANGUAGE RankNTypes #-} +{-# LANGUAGE ScopedTypeVariables #-} +{-# LANGUAGE StandaloneDeriving #-} +{-# LANGUAGE TupleSections #-} +{-# LANGUAGE TypeFamilies #-} +{-# LANGUAGE UndecidableInstances #-} +{-# LANGUAGE ViewPatterns #-} + +module Ouroboros.Consensus.Storage.LedgerDB.V2.InMemory ( + -- * LedgerTablesHandle + newInMemoryLedgerTablesHandle + -- * Snapshots + , loadSnapshot + , snapshotToStatePath + , snapshotToTablePath + , takeSnapshot + ) where + +import Cardano.Binary as CBOR +import qualified Codec.CBOR.Read as CBOR +import qualified Codec.CBOR.Write as CBOR +import Codec.Serialise (decode) +import Control.Monad (unless, void) +import Control.Monad.Except (runExceptT) +import Control.ResourceRegistry +import Control.Tracer +import qualified Data.ByteString.Lazy as BSL +import qualified Data.List as List +import qualified Data.Map.Strict as Map +import Data.Maybe +import Data.String (fromString) +import GHC.Generics +import NoThunks.Class +import Ouroboros.Consensus.Block +import Ouroboros.Consensus.Ledger.Abstract +import Ouroboros.Consensus.Ledger.Extended +import Ouroboros.Consensus.Ledger.SupportsProtocol +import Ouroboros.Consensus.Ledger.Tables.Utils +import Ouroboros.Consensus.Storage.LedgerDB.Impl.Common +import Ouroboros.Consensus.Storage.LedgerDB.Impl.Snapshots +import Ouroboros.Consensus.Storage.LedgerDB.V2.LedgerSeq +import Ouroboros.Consensus.Util.IOLike +import Prelude hiding (read) +import System.FS.API +import System.FS.API.Lazy + + +{------------------------------------------------------------------------------- + InMemory implementation of LedgerTablesHandles +-------------------------------------------------------------------------------} + +data LedgerTablesHandleState l = + LedgerTablesHandleOpen !(LedgerTables l ValuesMK) + | LedgerTablesHandleClosed + deriving Generic + +deriving instance NoThunks (LedgerTables l ValuesMK) => NoThunks (LedgerTablesHandleState l) + +data InMemoryClosedExn = InMemoryClosedExn + deriving (Show, Exception) + +guardClosed :: LedgerTablesHandleState l -> (LedgerTables l ValuesMK -> a) -> a +guardClosed LedgerTablesHandleClosed _ = error $ show InMemoryClosedExn +guardClosed (LedgerTablesHandleOpen st) f = f st + +newInMemoryLedgerTablesHandle :: + ( IOLike m + , HasLedgerTables l + , CanSerializeLedgerTables l + ) + => SomeHasFS m + -> LedgerTables l ValuesMK + -> m (LedgerTablesHandle m l) +newInMemoryLedgerTablesHandle someFS@(SomeHasFS hasFS) l = do + !tv <- newTVarIO (LedgerTablesHandleOpen l) + pure LedgerTablesHandle { + close = + atomically $ modifyTVar tv (\_ -> LedgerTablesHandleClosed) + , duplicate = do + hs <- readTVarIO tv + !x <- guardClosed hs $ newInMemoryLedgerTablesHandle someFS + pure x + , read = \keys -> do + hs <- readTVarIO tv + guardClosed hs (\st -> pure $ ltliftA2 rawRestrictValues st keys) + , readRange = \(f, t) -> do + hs <- readTVarIO tv + guardClosed hs (\(LedgerTables (ValuesMK m)) -> + pure . LedgerTables . ValuesMK . Map.take t . (maybe id (\g -> snd . Map.split g) f) $ m) + , write = \(!diffs) -> + atomically + $ modifyTVar tv + (\r -> guardClosed r (\st -> LedgerTablesHandleOpen (ltliftA2 rawApplyDiffs st diffs))) + , writeToDisk = \snapshotName -> do + createDirectoryIfMissing hasFS True $ mkFsPath [snapshotName, "tables"] + h <- readTVarIO tv + guardClosed h $ + \values -> + withFile hasFS (mkFsPath [snapshotName, "tables", "tvar"]) (WriteMode MustBeNew) $ \hf -> + void $ hPutAll hasFS hf + $ CBOR.toLazyByteString + $ valuesMKEncoder values + , tablesSize = do + hs <- readTVarIO tv + guardClosed hs (\(getLedgerTables -> ValuesMK m) -> pure $ Just $ Map.size m) + , isOpen = do + hs <- readTVarIO tv + case hs of + LedgerTablesHandleOpen{} -> pure True + LedgerTablesHandleClosed{} -> pure False + } + +{------------------------------------------------------------------------------- + Snapshots +-------------------------------------------------------------------------------} + +-- | The path within the LedgerDB's filesystem to the file that contains the +-- snapshot's serialized ledger state +snapshotToStatePath :: DiskSnapshot -> FsPath +snapshotToStatePath = mkFsPath . (\x -> [x, "state"]) . snapshotToDirName + +snapshotToTablePath :: DiskSnapshot -> FsPath +snapshotToTablePath = mkFsPath . (\x -> [x, "tables", "tvar"]) . snapshotToDirName + +writeSnapshot :: + MonadThrow m + => SomeHasFS m + -> (ExtLedgerState blk EmptyMK -> Encoding) + -> DiskSnapshot + -> StateRef m (ExtLedgerState blk) + -> m () +writeSnapshot fs@(SomeHasFS hasFs) encLedger ds st = do + createDirectoryIfMissing hasFs True $ snapshotToDirPath ds + writeExtLedgerState fs encLedger (snapshotToStatePath ds) $ state st + writeToDisk (tables st) $ snapshotToDirName ds + +takeSnapshot :: + ( MonadThrow m + , LedgerDbSerialiseConstraints blk + , LedgerSupportsProtocol blk + ) + => CodecConfig blk + -> Tracer m (TraceSnapshotEvent blk) + -> SomeHasFS m + -> Maybe DiskSnapshot + -> StateRef m (ExtLedgerState blk) + -> m (Maybe (DiskSnapshot, RealPoint blk)) +takeSnapshot ccfg tracer hasFS dsOverride st = do + case pointToWithOriginRealPoint (castPoint (getTip $ state st)) of + Origin -> return Nothing + NotOrigin t -> do + let number = unSlotNo (realPointSlot t) + snapshot = fromMaybe (DiskSnapshot number Nothing) dsOverride + diskSnapshots <- listSnapshots hasFS + if List.any ((== number) . dsNumber) diskSnapshots then + return Nothing + else do + writeSnapshot hasFS (encodeDiskExtLedgerState ccfg) snapshot st + traceWith tracer $ TookSnapshot snapshot t + return $ Just (snapshot, t) + +loadSnapshot :: + ( LedgerDbSerialiseConstraints blk + , LedgerSupportsProtocol blk + , IOLike m + ) + => ResourceRegistry m + -> CodecConfig blk + -> SomeHasFS m + -> DiskSnapshot + -> m (Either (SnapshotFailure blk) (LedgerSeq' m blk, RealPoint blk)) +loadSnapshot _rr ccfg fs@(SomeHasFS hasFS) ds = do + eExtLedgerSt <- runExceptT $ readExtLedgerState fs (decodeDiskExtLedgerState ccfg) decode (snapshotToStatePath ds) + case eExtLedgerSt of + Left err -> pure (Left $ InitFailureRead err) + Right extLedgerSt -> do + traceMarkerIO "Loaded state" + case pointToWithOriginRealPoint (castPoint (getTip extLedgerSt)) of + Origin -> pure (Left InitFailureGenesis) + NotOrigin pt -> do + values <- withFile hasFS ( fsPathFromList + $ fsPathToList (snapshotToDirPath ds) + <> [fromString "tables", fromString "tvar"]) ReadMode $ \h -> do + bs <- hGetAll hasFS h + case CBOR.deserialiseFromBytes valuesMKDecoder bs of + Left err -> error $ show err + Right (extra, x) -> do + unless (BSL.null extra) $ error "Trailing bytes in snapshot" + pure x + Right . (,pt) <$> empty extLedgerSt values (newInMemoryLedgerTablesHandle fs) diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/LedgerDB/V2/Init.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/LedgerDB/V2/Init.hs new file mode 100644 index 0000000000..b2b7cdc972 --- /dev/null +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/LedgerDB/V2/Init.hs @@ -0,0 +1,377 @@ +{-# LANGUAGE CPP #-} +{-# LANGUAGE DataKinds #-} +{-# LANGUAGE FlexibleContexts #-} +{-# LANGUAGE LambdaCase #-} +{-# LANGUAGE NamedFieldPuns #-} +{-# LANGUAGE Rank2Types #-} +{-# LANGUAGE ScopedTypeVariables #-} +{-# LANGUAGE TypeFamilies #-} +{-# LANGUAGE TypeOperators #-} + +#if __GLASGOW_HASKELL__ <= 906 +{-# OPTIONS_GHC -Wno-redundant-constraints #-} +#endif + +module Ouroboros.Consensus.Storage.LedgerDB.V2.Init (mkInitDb) where + +import Control.Monad (void) +import Control.Monad.Base +import qualified Control.RAWLock as RAWLock +import Control.ResourceRegistry +import Control.Tracer +#if __GLASGOW_HASKELL__ < 910 +import Data.Foldable +#endif +import Data.Functor.Contravariant ((>$<)) +import qualified Data.Map.Strict as Map +import Data.Maybe (isJust) +import Data.Set (Set) +import qualified Data.Set as Set +import Data.Word +import NoThunks.Class +import Ouroboros.Consensus.Block +import Ouroboros.Consensus.Config +import Ouroboros.Consensus.HardFork.Abstract +import Ouroboros.Consensus.HeaderStateHistory + (HeaderStateHistory (..), mkHeaderStateWithTimeFromSummary) +import Ouroboros.Consensus.HeaderValidation +import Ouroboros.Consensus.Ledger.Abstract +import Ouroboros.Consensus.Ledger.Extended +import Ouroboros.Consensus.Ledger.SupportsProtocol +import Ouroboros.Consensus.Storage.ChainDB.Impl.BlockCache +import Ouroboros.Consensus.Storage.LedgerDB.API +import Ouroboros.Consensus.Storage.LedgerDB.API.Config +import Ouroboros.Consensus.Storage.LedgerDB.Impl.Args +import Ouroboros.Consensus.Storage.LedgerDB.Impl.Common +import Ouroboros.Consensus.Storage.LedgerDB.Impl.Init +import Ouroboros.Consensus.Storage.LedgerDB.Impl.Snapshots +import qualified Ouroboros.Consensus.Storage.LedgerDB.Impl.Validate as Validate +import Ouroboros.Consensus.Storage.LedgerDB.V2.Args as V2 +import Ouroboros.Consensus.Storage.LedgerDB.V2.Common +import qualified Ouroboros.Consensus.Storage.LedgerDB.V2.InMemory as InMemory +import Ouroboros.Consensus.Storage.LedgerDB.V2.LedgerSeq +import Ouroboros.Consensus.Util +import Ouroboros.Consensus.Util.Args +import Ouroboros.Consensus.Util.CallStack +import Ouroboros.Consensus.Util.IOLike +import qualified Ouroboros.Network.AnchoredSeq as AS +import Ouroboros.Network.Protocol.LocalStateQuery.Type +import System.FS.API + +mkInitDb :: forall m blk. + ( LedgerSupportsProtocol blk + , IOLike m + , MonadBase m m + , LedgerDbSerialiseConstraints blk + , HasHardForkHistory blk +#if __GLASGOW_HASKELL__ < 910 + , HasAnnTip blk +#endif + ) + => Complete LedgerDbArgs m blk + -> Complete V2.LedgerDbFlavorArgs m + -> Validate.ResolveBlock m blk + -> InitDB (LedgerSeq' m blk) m blk +mkInitDb args flavArgs getBlock = + InitDB { + initFromGenesis = emptyF =<< lgrGenesis + , initFromSnapshot = \ds -> do + traceMarkerIO "Loading snapshot" + s <- loadSnapshot (configCodec . getExtLedgerCfg . ledgerDbCfg $ lgrConfig) lgrHasFS ds + traceMarkerIO "Loaded snapshot" + pure s + , closeDb = closeLedgerSeq + , initReapplyBlock = \a b c -> do + (LedgerSeq x, y) <- reapplyThenPush lgrRegistry a b c + mapM_ (close . tables) (AS.toOldestFirst x) + pure y + , currentTip = ledgerState . current + , mkLedgerDb = \lseq -> do + traceMarkerIO "Initialize LedgerDB" + let (LedgerSeq rel, dbPrunedToImmDBTip) = pruneToImmTipOnly lseq + mapM_ (close . tables) (AS.toOldestFirst rel) + (varDB, prevApplied) <- + (,) <$> newTVarIO dbPrunedToImmDBTip <*> newTVarIO Set.empty + forkers <- newTVarIO Map.empty + nextForkerKey <- newTVarIO (ForkerKey 0) + lock <- RAWLock.new LDBLock + let env = LedgerDBEnv { + ldbSeq = varDB + , ldbPrevApplied = prevApplied + , ldbForkers = forkers + , ldbNextForkerKey = nextForkerKey + , ldbSnapshotPolicy = defaultSnapshotPolicy (ledgerDbCfgSecParam lgrConfig) lgrSnapshotPolicyArgs + , ldbTracer = lgrTracer + , ldbCfg = lgrConfig + , ldbHasFS = lgrHasFS + , ldbResolveBlock = getBlock + , ldbQueryBatchSize = Nothing + , ldbReleaseLock = AllowThunk lock + } + h <- LDBHandle <$> newTVarIO (LedgerDBOpen env) + pure $ implMkLedgerDb h bss + } + where + LedgerDbArgs { + lgrConfig + , lgrGenesis + , lgrHasFS + , lgrSnapshotPolicyArgs + , lgrTracer + , lgrRegistry + } = args + + bss = case flavArgs of V2Args bss0 -> bss0 + + emptyF :: ExtLedgerState blk ValuesMK + -> m (LedgerSeq' m blk) + emptyF st = + empty' st $ case bss of + InMemoryHandleArgs -> InMemory.newInMemoryLedgerTablesHandle lgrHasFS + --TODO LSMHandleArgs -> LSM.newLSMLedgerTablesHandle + + loadSnapshot :: CodecConfig blk + -> SomeHasFS m + -> DiskSnapshot + -> m (Either (SnapshotFailure blk) (LedgerSeq' m blk, RealPoint blk)) + loadSnapshot = case bss of + InMemoryHandleArgs -> InMemory.loadSnapshot lgrRegistry + --TODO LSMHandleArgs -> LSM.loadSnapshot + +implMkLedgerDb :: + forall m l blk. + ( IOLike m + , HasCallStack + , IsLedger l + , l ~ ExtLedgerState blk + , StandardHash l, HasLedgerTables l +#if __GLASGOW_HASKELL__ < 908 + , HeaderHash l ~ HeaderHash blk +#endif + , LedgerSupportsProtocol blk + , LedgerDbSerialiseConstraints blk + , MonadBase m m + , HasHardForkHistory blk + ) + => LedgerDBHandle m l blk + -> HandleArgs + -> (LedgerDB m l blk, TestInternals m l blk) +implMkLedgerDb h bss = (LedgerDB { + getVolatileTip = getEnvSTM h implGetVolatileTip + , getImmutableTip = getEnvSTM h implGetImmutableTip + , getPastLedgerState = getEnvSTM1 h implGetPastLedgerState + , getHeaderStateHistory = getEnvSTM h implGetHeaderStateHistory + , getForkerAtWellKnownPoint = newForkerAtWellKnownPoint h + , getForkerAtPoint = newForkerAtPoint h + , validate = getEnv5 h (implValidate h) + , getPrevApplied = getEnvSTM h implGetPrevApplied + , garbageCollect = getEnvSTM1 h implGarbageCollect + , tryTakeSnapshot = getEnv2 h (implTryTakeSnapshot bss) + , tryFlush = getEnv h implTryFlush + , closeDB = implCloseDB h + }, mkInternals bss h) + +mkInternals :: + forall m blk. ( IOLike m + , LedgerDbSerialiseConstraints blk + , LedgerSupportsProtocol blk + , ApplyBlock (ExtLedgerState blk) blk +#if __GLASGOW_HASKELL__ > 810 + , MonadBase m m +#endif + ) + => HandleArgs + -> LedgerDBHandle m (ExtLedgerState blk) blk + -> TestInternals' m blk +mkInternals bss h = TestInternals { + takeSnapshotNOW = \ds -> getEnv h $ \env -> do + void . takeSnapshot + (configCodec . getExtLedgerCfg . ledgerDbCfg $ ldbCfg env) + (LedgerDBSnapshotEvent >$< ldbTracer env) + (ldbHasFS env) + ds + . anchorHandle + =<< readTVarIO (ldbSeq env) + , reapplyThenPushNOW = \blk -> getEnv h $ \env -> withRegistry $ \reg -> do + frk <- newForkerAtWellKnownPoint h reg VolatileTip + st <- atomically $ forkerGetLedgerState frk + tables <- forkerReadTables frk (getBlockKeySets blk) + let st' = tickThenReapply (ledgerDbCfg $ ldbCfg env) blk (st `withLedgerTables` tables) + forkerPush frk st' >> atomically (forkerCommit frk) >> forkerClose frk + , wipeLedgerDB = getEnv h $ destroySnapshots . ldbHasFS + , closeLedgerDB = + let LDBHandle tvar = h in + atomically (modifyTVar tvar (const LedgerDBClosed)) + , truncateSnapshots = getEnv h $ implIntTruncateSnapshots . ldbHasFS + } + where + takeSnapshot :: CodecConfig blk + -> Tracer m (TraceSnapshotEvent blk) + -> SomeHasFS m + -> Maybe DiskSnapshot + -> StateRef m (ExtLedgerState blk) + -> m (Maybe (DiskSnapshot, RealPoint blk)) + takeSnapshot = case bss of + InMemoryHandleArgs -> InMemory.takeSnapshot + --TODO LSMHandleArgs -> LSM.takeSnapshot + +-- | Testing only! Destroy all snapshots in the DB. +destroySnapshots :: Monad m => SomeHasFS m -> m () +destroySnapshots (SomeHasFS fs) = do + dirs <- Set.lookupMax . Set.filter (isJust . snapshotFromPath) <$> listDirectory fs (mkFsPath []) + mapM_ ((\d -> do + isDir <- doesDirectoryExist fs d + if isDir + then removeDirectoryRecursive fs d + else removeFile fs d + ) . mkFsPath . (:[])) dirs + +-- | Testing only! Truncate all snapshots in the DB. +implIntTruncateSnapshots :: MonadThrow m => SomeHasFS m -> m () +implIntTruncateSnapshots (SomeHasFS fs) = do + dirs <- Set.lookupMax . Set.filter (isJust . snapshotFromPath) <$> listDirectory fs (mkFsPath []) + mapM_ (truncateRecursively . (:[])) dirs + where + truncateRecursively pre = do + dirs <- listDirectory fs (mkFsPath pre) + mapM_ (\d -> do + let d' = pre ++ [d] + isDir <- doesDirectoryExist fs $ mkFsPath d' + if isDir + then truncateRecursively d' + else withFile fs (mkFsPath d') (AppendMode AllowExisting) $ \h -> hTruncate fs h 0 + ) dirs + +implGetVolatileTip :: + (MonadSTM m, GetTip l) + => LedgerDBEnv m l blk + -> STM m (l EmptyMK) +implGetVolatileTip = fmap current . readTVar . ldbSeq + +implGetImmutableTip :: + MonadSTM m + => LedgerDBEnv m l blk + -> STM m (l EmptyMK) +implGetImmutableTip = fmap anchor . readTVar . ldbSeq + +implGetPastLedgerState :: + ( MonadSTM m , HasHeader blk, IsLedger l, StandardHash l + , HeaderHash l ~ HeaderHash blk ) + => LedgerDBEnv m l blk -> Point blk -> STM m (Maybe (l EmptyMK)) +implGetPastLedgerState env point = getPastLedgerAt point <$> readTVar (ldbSeq env) + +implGetHeaderStateHistory :: + ( MonadSTM m + , l ~ ExtLedgerState blk + , IsLedger (LedgerState blk) + , HasHardForkHistory blk + , HasAnnTip blk + ) + => LedgerDBEnv m l blk -> STM m (HeaderStateHistory blk) +implGetHeaderStateHistory env = do + ldb <- readTVar (ldbSeq env) + let currentLedgerState = ledgerState $ current ldb + -- This summary can convert all tip slots of the ledger states in the + -- @ledgerDb@ as these are not newer than the tip slot of the current + -- ledger state (Property 17.1 in the Consensus report). + summary = hardForkSummary (configLedger $ getExtLedgerCfg $ ledgerDbCfg $ ldbCfg env) currentLedgerState + mkHeaderStateWithTime' = + mkHeaderStateWithTimeFromSummary summary + . headerState + . state + pure + . HeaderStateHistory + . AS.bimap mkHeaderStateWithTime' mkHeaderStateWithTime' + $ getLedgerSeq ldb + +implValidate :: + forall m l blk. ( + IOLike m + , LedgerSupportsProtocol blk + , HasCallStack + , l ~ ExtLedgerState blk + , MonadBase m m + ) + => LedgerDBHandle m l blk + -> LedgerDBEnv m l blk + -> ResourceRegistry m + -> (TraceValidateEvent blk -> m ()) + -> BlockCache blk + -> Word64 + -> [Header blk] + -> m (ValidateResult m (ExtLedgerState blk) blk) +implValidate h ldbEnv = + Validate.validate + (ldbResolveBlock ldbEnv) + (getExtLedgerCfg . ledgerDbCfg $ ldbCfg ldbEnv) + (\l -> do + prev <- readTVar (ldbPrevApplied ldbEnv) + writeTVar (ldbPrevApplied ldbEnv) (foldl' (flip Set.insert) prev l)) + (readTVar (ldbPrevApplied ldbEnv)) + (newForkerAtFromTip h) + +implGetPrevApplied :: MonadSTM m => LedgerDBEnv m l blk -> STM m (Set (RealPoint blk)) +implGetPrevApplied env = readTVar (ldbPrevApplied env) + +-- | Remove all points with a slot older than the given slot from the set of +-- previously applied points. +implGarbageCollect :: MonadSTM m => LedgerDBEnv m l blk -> SlotNo -> STM m () +implGarbageCollect env slotNo = modifyTVar (ldbPrevApplied env) $ + Set.dropWhileAntitone ((< slotNo) . realPointSlot) + +implTryTakeSnapshot :: + forall m l blk. + ( l ~ ExtLedgerState blk + , IOLike m + , LedgerSupportsProtocol blk + , LedgerDbSerialiseConstraints blk + ) + => HandleArgs + -> LedgerDBEnv m l blk + -> Maybe (Time, Time) + -> Word64 + -> m SnapCounters +implTryTakeSnapshot bss env mTime nrBlocks = + if onDiskShouldTakeSnapshot (ldbSnapshotPolicy env) (uncurry (flip diffTime) <$> mTime) nrBlocks then do + void . takeSnapshot + (configCodec . getExtLedgerCfg . ledgerDbCfg $ ldbCfg env) + (LedgerDBSnapshotEvent >$< ldbTracer env) + (ldbHasFS env) + . anchorHandle + =<< readTVarIO (ldbSeq env) + void $ trimSnapshots + (LedgerDBSnapshotEvent >$< ldbTracer env) + (ldbHasFS env) + (ldbSnapshotPolicy env) + (`SnapCounters` 0) . Just <$> maybe getMonotonicTime (pure . snd) mTime + else + pure $ SnapCounters (fst <$> mTime) nrBlocks + where + takeSnapshot :: CodecConfig blk + -> Tracer m (TraceSnapshotEvent blk) + -> SomeHasFS m + -> StateRef m (ExtLedgerState blk) + -> m (Maybe (DiskSnapshot, RealPoint blk)) + takeSnapshot config trcr fs ref = case bss of + InMemoryHandleArgs -> InMemory.takeSnapshot config trcr fs Nothing ref + --TODO LSMHandleArgs -> LSM.takeSnapshot config trcr fs Nothing ref + +-- In the first version of the LedgerDB for UTxO-HD, there is a need to +-- periodically flush the accumulated differences to the disk. However, in the +-- second version there is no need to do so, and because of that, this function +-- does nothing in this case. +implTryFlush :: Applicative m => LedgerDBEnv m l blk -> m () +implTryFlush _ = pure () + +implCloseDB :: IOLike m => LedgerDBHandle m l blk -> m () +implCloseDB (LDBHandle varState) = do + mbOpenEnv <- atomically $ readTVar varState >>= \case + -- Idempotent + LedgerDBClosed -> return Nothing + LedgerDBOpen env -> do + writeTVar varState LedgerDBClosed + return $ Just env + + -- Only when the LedgerDB was open + whenJust mbOpenEnv $ \env -> do + closeAllForkers env diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/LedgerDB/V2/LSM.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/LedgerDB/V2/LSM.hs new file mode 100644 index 0000000000..6ea1e32e87 --- /dev/null +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/LedgerDB/V2/LSM.hs @@ -0,0 +1,53 @@ +{-# LANGUAGE DataKinds #-} +{-# LANGUAGE EmptyDataDeriving #-} +{-# LANGUAGE FlexibleContexts #-} +{-# LANGUAGE TypeFamilies #-} + +{-# OPTIONS_GHC -Wno-redundant-constraints -Wno-orphans #-} + +-- | TODO This whole file has to be implemented once we have LSM +module Ouroboros.Consensus.Storage.LedgerDB.V2.LSM ( + loadSnapshot + , newLSMLedgerTablesHandle + , takeSnapshot + ) where + +import Control.Tracer +import Ouroboros.Consensus.Block +import Ouroboros.Consensus.Ledger.Extended +import Ouroboros.Consensus.Ledger.SupportsProtocol +import Ouroboros.Consensus.Ledger.Tables +import Ouroboros.Consensus.Storage.LedgerDB.Impl.Common +import Ouroboros.Consensus.Storage.LedgerDB.Impl.Snapshots +import Ouroboros.Consensus.Storage.LedgerDB.V2.LedgerSeq +import Ouroboros.Consensus.Util.IOLike +import System.FS.API + +newLSMLedgerTablesHandle :: + ( IOLike m + , HasLedgerTables l + , CanSerializeLedgerTables l + ) + => LedgerTables l ValuesMK + -> m (LedgerTablesHandle m l) +newLSMLedgerTablesHandle = undefined + +loadSnapshot :: + ( LedgerDbSerialiseConstraints blk + , LedgerSupportsProtocol blk + , IOLike m + ) + => CodecConfig blk + -> SomeHasFS m + -> DiskSnapshot + -> m (Either (SnapshotFailure blk) (LedgerSeq' m blk, RealPoint blk)) +loadSnapshot = undefined + +takeSnapshot :: + CodecConfig blk + -> Tracer m (TraceSnapshotEvent blk) + -> SomeHasFS m + -> Maybe DiskSnapshot + -> StateRef m (ExtLedgerState blk) + -> m (Maybe (DiskSnapshot, RealPoint blk)) +takeSnapshot = undefined diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/LedgerDB/V2/LedgerSeq.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/LedgerDB/V2/LedgerSeq.hs new file mode 100644 index 0000000000..272bca29e8 --- /dev/null +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/LedgerDB/V2/LedgerSeq.hs @@ -0,0 +1,485 @@ +{-# LANGUAGE DataKinds #-} +{-# LANGUAGE DeriveAnyClass #-} +{-# LANGUAGE DeriveGeneric #-} +{-# LANGUAGE DerivingVia #-} +{-# LANGUAGE FlexibleContexts #-} +{-# LANGUAGE FlexibleInstances #-} +{-# LANGUAGE GADTs #-} +{-# LANGUAGE GeneralizedNewtypeDeriving #-} +{-# LANGUAGE MultiParamTypeClasses #-} +{-# LANGUAGE QuantifiedConstraints #-} +{-# LANGUAGE RankNTypes #-} +{-# LANGUAGE ScopedTypeVariables #-} +{-# LANGUAGE StandaloneDeriving #-} +{-# LANGUAGE TypeOperators #-} +{-# LANGUAGE UndecidableInstances #-} + +-- | The data structure that holds the cached ledger states. +module Ouroboros.Consensus.Storage.LedgerDB.V2.LedgerSeq ( + -- * LedgerHandles + LedgerTablesHandle (..) + -- * The ledger seq + , LedgerSeq (..) + , LedgerSeq' + , StateRef (..) + , closeLedgerSeq + , empty + , empty' + -- * Apply Blocks + , extend + , prune + , pruneToImmTipOnly + , reapplyBlock + , reapplyThenPush + -- * Queries + , anchor + , anchorHandle + , current + , currentHandle + , getPastLedgerAt + , immutableTipSlot + , isSaturated + , maxRollback + , rollback + , rollbackN + , rollbackToAnchor + , rollbackToPoint + , snapshots + , tip + , volatileStatesBimap + ) where + +import Control.ResourceRegistry +import qualified Data.Bifunctor as B +import Data.Function (on) +import Data.Word +import GHC.Generics +import NoThunks.Class +import Ouroboros.Consensus.Block +import Ouroboros.Consensus.Config.SecurityParam +import Ouroboros.Consensus.Ledger.Abstract +import Ouroboros.Consensus.Ledger.Extended +import Ouroboros.Consensus.Ledger.Tables.Utils +import Ouroboros.Consensus.Storage.LedgerDB.API.Config +import Ouroboros.Consensus.Util.IOLike +import Ouroboros.Network.AnchoredSeq hiding (anchor, last, map, + rollback) +import qualified Ouroboros.Network.AnchoredSeq as AS hiding (map) +import Prelude hiding (read) + +{------------------------------------------------------------------------------- + LedgerTablesHandles +-------------------------------------------------------------------------------} + +data LedgerTablesHandle m l = LedgerTablesHandle { + close :: m () + , duplicate :: m (LedgerTablesHandle m l) + , read :: LedgerTables l KeysMK -> m (LedgerTables l ValuesMK) + , readRange :: (Maybe (Key l), Int) -> m (LedgerTables l ValuesMK) + , write :: LedgerTables l DiffMK -> m () + , writeToDisk :: String -> m () + , tablesSize :: m (Maybe Int) + , isOpen :: m Bool + } + deriving NoThunks via OnlyCheckWhnfNamed "LedgerTablesHandle" (LedgerTablesHandle m l) + +{------------------------------------------------------------------------------- + StateRef, represents a full virtual ledger state +-------------------------------------------------------------------------------} + +-- | For unary blocks, it would be the same to hold a stowed ledger state, an +-- unstowed one or a tuple with the state and the tables, however, for a n-ary +-- block, these are not equivalent. +-- +-- If we were to hold a sequence of type @LedgerState blk EmptyMK@ with stowed +-- values, we would have to translate the entirety of the tables on epoch +-- boundaries. +-- +-- If we were to hold a sequence of type @LedgerState blk ValuesMK@ we would +-- have the same problem as the @mk@ in the state actually refers to the @mk@ in +-- the @HardForkState@'ed state. +-- +-- Therefore it sounds reasonable to hold a @LedgerState blk EmptyMK@ with no +-- values, and a @LedgerTables blk ValuesMK@ next to it, that will live its +-- entire lifetime as @LedgerTables@ of the @HardForkBlock@. +data StateRef m l = StateRef { + state :: !(l EmptyMK) + , tables :: !(LedgerTablesHandle m l) + } deriving (Generic) + +deriving instance (IOLike m, NoThunks (l EmptyMK)) => NoThunks (StateRef m l) + +instance Eq (l EmptyMK) => Eq (StateRef m l) where + (==) = (==) `on` state + +instance Show (l EmptyMK) => Show (StateRef m l) where + show = show . state + +instance GetTip l => Anchorable (WithOrigin SlotNo) (StateRef m l) (StateRef m l) where + asAnchor = id + getAnchorMeasure _ = getTipSlot . state + +{------------------------------------------------------------------------------- + The LedgerSeq +-------------------------------------------------------------------------------} + +newtype LedgerSeq m l = LedgerSeq { + getLedgerSeq :: AnchoredSeq (WithOrigin SlotNo) (StateRef m l) (StateRef m l) + } deriving (Generic) + +deriving newtype instance (IOLike m, NoThunks (l EmptyMK)) => NoThunks (LedgerSeq m l) + +deriving newtype instance Eq (l EmptyMK) => Eq (LedgerSeq m l) +deriving newtype instance Show (l EmptyMK) => Show (LedgerSeq m l) + +type LedgerSeq' m blk = LedgerSeq m (ExtLedgerState blk) + +{------------------------------------------------------------------------------- + Construction +-------------------------------------------------------------------------------} + +-- | Creates an empty @LedgerSeq@ +empty :: + ( GetTip l + , IOLike m + ) + => l EmptyMK + -> LedgerTables l ValuesMK + -> (LedgerTables l ValuesMK -> m ( LedgerTablesHandle m l)) + -> m (LedgerSeq m l) +empty st tbs new = LedgerSeq . AS.Empty . StateRef st <$> new tbs + +-- | Creates an empty @LedgerSeq@ +empty' :: + ( GetTip l + , IOLike m + , HasLedgerTables l + ) + => l ValuesMK + -> (LedgerTables l ValuesMK -> m (LedgerTablesHandle m l)) + -> m (LedgerSeq m l) +empty' st = empty (forgetLedgerTables st) (ltprj st) + +closeLedgerSeq :: Monad m => LedgerSeq m l -> m () +closeLedgerSeq = mapM_ (close . tables) . toOldestFirst . getLedgerSeq + +{------------------------------------------------------------------------------- + Apply blocks +-------------------------------------------------------------------------------} + +-- | If applying a block on top of the ledger state at the tip is succesful, +-- extend the DbChangelog with the resulting ledger state. +-- +-- Note that we require @c@ (from the particular choice of @Ap m l blk c@) so +-- this sometimes can throw ledger errors. +reapplyThenPush :: (IOLike m, ApplyBlock l blk) + => ResourceRegistry m + -> LedgerDbCfg l + -> blk + -> LedgerSeq m l + -> m (LedgerSeq m l, LedgerSeq m l) +reapplyThenPush rr cfg ap db = + (\current' -> prune (ledgerDbCfgSecParam cfg) $ extend current' db) <$> + reapplyBlock (ledgerDbCfg cfg) ap rr db + +reapplyBlock :: forall m l blk. (ApplyBlock l blk, IOLike m) + => LedgerCfg l + -> blk + -> ResourceRegistry m + -> LedgerSeq m l + -> m (StateRef m l) +reapplyBlock cfg b _rr db = do + let ks = getBlockKeySets b + case currentHandle db of + StateRef st tbs -> do + newtbs <- duplicate tbs + vals <- read newtbs ks + let st' = tickThenReapply cfg b (st `withLedgerTables` vals) + let (newst, diffs) = (forgetLedgerTables st', ltprj st') + write newtbs diffs + pure (StateRef newst newtbs) + +-- | Prune ledger states from the front until at we have at most @k@ in the +-- LedgerDB, excluding the one stored at the anchor. +-- +-- >>> ldb = LedgerSeq $ AS.fromOldestFirst l0 [l1, l2, l3] +-- >>> ldb' = LedgerSeq $ AS.fromOldestFirst l1 [l2, l3] +-- >>> snd (prune (SecurityParam 2) ldb) == ldb' +-- True +prune :: GetTip l + => SecurityParam + -> LedgerSeq m l + -> (LedgerSeq m l, LedgerSeq m l) +prune (SecurityParam k) (LedgerSeq ldb) = + if toEnum nvol <= k + then (LedgerSeq $ Empty (AS.anchor ldb), LedgerSeq ldb) + else B.bimap (LedgerSeq . dropNewest 1) LedgerSeq $ AS.splitAt (nvol - fromEnum k) ldb + where + nvol = AS.length ldb + +-- NOTE: we must inline 'prune' otherwise we get unexplained thunks in +-- 'LedgerSeq' and thus a space leak. Alternatively, we could disable the +-- @-fstrictness@ optimisation (enabled by default for -O1). See #2532. +-- NOTE (@js): this INLINE was inherited from before UTxO-HD, so maybe it is not +-- needed anymore. +{-# INLINE prune #-} + +-- | Extending the LedgerDB with a valid ledger state. +-- +-- >>> ldb = LedgerSeq $ AS.fromOldestFirst l0 [l1, l2, l3] +-- >>> LedgerSeq ldb' = extend l4 ldb +-- >>> AS.toOldestFirst ldb' == [l1, l2, l3, l4] +-- True +extend :: GetTip l + => StateRef m l + -> LedgerSeq m l + -> LedgerSeq m l +extend newState = + LedgerSeq . (:> newState) . getLedgerSeq + +{------------------------------------------------------------------------------- + Reset +-------------------------------------------------------------------------------} + +-- | When creating a new @LedgerDB@, we should load whichever snapshot we find +-- and then replay the chain up to the immutable tip. When we get there, the +-- @LedgerDB@ will have a @k@-long sequence of states, which all come from +-- immutable blocks, so we just prune all of them and only keep the last one as +-- an anchor, as it is the immutable tip. Then we can proceed with opening the +-- VolatileDB. +-- +-- If we didn't do this step, the @LedgerDB@ would accept rollbacks into the +-- immutable part of the chain, which must never be possible. +-- +-- >>> ldb = LedgerSeq $ AS.fromOldestFirst l0 [l1, l2, l3] +-- >>> LedgerSeq ldb' = snd $ pruneToImmTipOnly ldb +-- >>> AS.anchor ldb' == l3 && AS.toOldestFirst ldb' == [] +-- True +pruneToImmTipOnly :: GetTip l + => LedgerSeq m l + -> (LedgerSeq m l, LedgerSeq m l) +pruneToImmTipOnly = prune (SecurityParam 0) + +{------------------------------------------------------------------------------- + Internal: rolling back +-------------------------------------------------------------------------------} + +-- | Rollback @n@ ledger states. +-- +-- Returns 'Nothing' if maximum rollback (usually @k@, but can be less on +-- startup or under corruption) is exceeded. +-- +-- >>> ldb = LedgerSeq $ AS.fromOldestFirst l0 [l1, l2, l3] +-- >>> fmap (([l1] ==) . AS.toOldestFirst . getLedgerSeq) (rollbackN 2 ldb) +-- Just True +rollbackN :: + GetTip l + => Word64 + -> LedgerSeq m l + -> Maybe (LedgerSeq m l) +rollbackN n ldb + | n <= maxRollback ldb + = Just $ LedgerSeq (AS.dropNewest (fromIntegral n) $ getLedgerSeq ldb) + | otherwise + = Nothing + +{------------------------------------------------------------------------------- + Queries +-------------------------------------------------------------------------------} + +-- | The ledger state at the tip of the chain +-- +-- >>> ldb = LedgerSeq $ AS.fromOldestFirst l0 [l1, l2, l3] +-- >>> l3s == current ldb +-- True +current :: GetTip l => LedgerSeq m l -> l EmptyMK +current = state . currentHandle + +currentHandle :: GetTip l => LedgerSeq m l -> StateRef m l +currentHandle = headAnchor . getLedgerSeq + +-- | The ledger state at the anchor of the Volatile chain (i.e. the immutable +-- tip). +-- +-- >>> ldb = LedgerSeq $ AS.fromOldestFirst l0 [l1, l2, l3] +-- >>> l0s == anchor ldb +-- True +anchor :: LedgerSeq m l -> l EmptyMK +anchor = state . anchorHandle + +anchorHandle :: LedgerSeq m l -> StateRef m l +anchorHandle = AS.anchor . getLedgerSeq + +-- | All snapshots currently stored by the ledger DB (new to old) +-- +-- This also includes the snapshot at the anchor. For each snapshot we also +-- return the distance from the tip. +-- +-- >>> ldb = LedgerSeq $ AS.fromOldestFirst l0 [l1, l2, l3] +-- >>> [(0, l3s), (1, l2s), (2, l1s)] == snapshots ldb +-- True +snapshots :: LedgerSeq m l -> [(Word64, l EmptyMK)] +snapshots = + zip [0..] + . map state + . AS.toNewestFirst + . getLedgerSeq + +-- | How many blocks can we currently roll back? +-- +-- >>> ldb = LedgerSeq $ AS.fromOldestFirst l0 [l1, l2, l3] +-- >>> maxRollback ldb +-- 3 +maxRollback :: GetTip l => LedgerSeq m l -> Word64 +maxRollback = + fromIntegral + . AS.length + . getLedgerSeq + +-- | Reference to the block at the tip of the chain +-- +-- >>> ldb = LedgerSeq $ AS.fromOldestFirst l0 [l1, l2, l3] +-- >>> tip ldb == getTip l3s +-- True +tip :: GetTip l => LedgerSeq m l -> Point l +tip = castPoint . getTip . current + +-- | Have we seen at least @k@ blocks? +-- +-- >>> ldb = LedgerSeq $ AS.fromOldestFirst l0 [l1, l2, l3] +-- >>> isSaturated (SecurityParam 3) ldb +-- True +-- >>> isSaturated (SecurityParam 4) ldb +-- False +isSaturated :: GetTip l => SecurityParam -> LedgerSeq m l -> Bool +isSaturated (SecurityParam k) db = + maxRollback db >= k + +-- | Get a past ledger state +-- +-- \( O(\log(\min(i,n-i)) \) +-- +-- When no ledger state (or anchor) has the given 'Point', 'Nothing' is +-- returned. +-- +-- >>> ldb = LedgerSeq $ AS.fromOldestFirst l0 [l1, l2, l3] +-- >>> getPastLedgerAt (Point (At (Block 4 4)) :: Point B) ldb == Nothing +-- True +-- >>> getPastLedgerAt (Point (At (Block 1 1)) :: Point B) ldb == Just l2s +-- True +getPastLedgerAt :: + ( HasHeader blk, GetTip l, HeaderHash l ~ HeaderHash blk + , StandardHash l + ) + => Point blk + -> LedgerSeq m l + -> Maybe (l EmptyMK) +getPastLedgerAt pt db = current <$> rollback pt db + +-- | Roll back the volatile states up to the specified point. +-- +-- >>> ldb = LedgerSeq $ AS.fromOldestFirst l0 [l1, l2, l3] +-- >>> Just (LedgerSeq ldb') = rollbackToPoint (Point Origin) ldb +-- >>> AS.anchor ldb' == l0 && AS.toOldestFirst ldb' == [] +-- True +-- >>> rollbackToPoint (Point (At (Block 1 2))) ldb == Nothing +-- True +-- >>> Just (LedgerSeq ldb') = rollbackToPoint (Point (At (Block 1 1))) ldb +-- >>> AS.anchor ldb' == l0 && AS.toOldestFirst ldb' == [l1, l2] +-- True +rollbackToPoint :: + ( StandardHash l + , GetTip l + ) + => Point l -> LedgerSeq m l -> Maybe (LedgerSeq m l) +rollbackToPoint pt (LedgerSeq ldb) = do + LedgerSeq <$> + AS.rollback + (pointSlot pt) + ((== pt) . getTip . either state state) + ldb + +-- | Rollback the volatile states up to the volatile anchor. +-- +-- >>> ldb = LedgerSeq $ AS.fromOldestFirst l0 [l1, l2, l3] +-- >>> LedgerSeq ldb' = rollbackToAnchor ldb +-- >>> AS.anchor ldb' == l0 && AS.toOldestFirst ldb' == [] +-- True +rollbackToAnchor :: + GetTip l + => LedgerSeq m l -> LedgerSeq m l +rollbackToAnchor (LedgerSeq vol) = + LedgerSeq (AS.Empty (AS.anchor vol)) + +-- | Get a prefix of the LedgerDB that ends at the given point +-- +-- \( O(\log(\min(i,n-i)) \) +-- +-- When no ledger state (or anchor) has the given 'Point', 'Nothing' is +-- returned. +rollback :: + ( HasHeader blk, GetTip l, HeaderHash l ~ HeaderHash blk + , StandardHash l + ) + => Point blk + -> LedgerSeq m l + -> Maybe (LedgerSeq m l) +rollback pt db + | pt == castPoint (getTip (anchor db)) + = Just $ rollbackToAnchor db + | otherwise + = rollbackToPoint (castPoint pt) db + +immutableTipSlot :: + GetTip l + => LedgerSeq m l -> WithOrigin SlotNo +immutableTipSlot = + getTipSlot + . state + . AS.anchor + . getLedgerSeq + +-- | Transform the underlying volatile 'AnchoredSeq' using the given functions. +volatileStatesBimap :: + AS.Anchorable (WithOrigin SlotNo) a b + => (l EmptyMK -> a) + -> (l EmptyMK -> b) + -> LedgerSeq m l + -> AS.AnchoredSeq (WithOrigin SlotNo) a b +volatileStatesBimap f g = + AS.bimap (f . state) (g . state) + . getLedgerSeq + +{------------------------------------------------------------------------------- + docspec setup +-------------------------------------------------------------------------------} + +-- $setup +-- >>> :set -XTypeFamilies -XUndecidableInstances +-- >>> import qualified Ouroboros.Network.AnchoredSeq as AS +-- >>> import Ouroboros.Network.Block +-- >>> import Ouroboros.Network.Point +-- >>> import Ouroboros.Consensus.Ledger.Tables +-- >>> import Ouroboros.Consensus.Ledger.Tables.Utils +-- >>> import Ouroboros.Consensus.Ledger.Basics +-- >>> import Ouroboros.Consensus.Config +-- >>> import Ouroboros.Consensus.Storage.LedgerDB.V2.InMemory +-- >>> import Data.Void +-- >>> import Cardano.Slotting.Slot +-- >>> data B +-- >>> data LS (mk :: MapKind) = LS (Point LS) +-- >>> type instance HeaderHash LS = Int +-- >>> type instance HeaderHash B = HeaderHash LS +-- >>> instance StandardHash LS +-- >>> type instance Key LS = Void +-- >>> type instance Value LS = Void +-- >>> instance LedgerTablesAreTrivial LS where convertMapKind (LS p) = LS p +-- >>> instance HasLedgerTables LS +-- >>> s = [LS (Point Origin), LS (Point (At (Block 0 0))), LS (Point (At (Block 1 1))), LS (Point (At (Block 2 2))), LS (Point (At (Block 3 3)))] +-- >>> [l0s, l1s, l2s, l3s, l4s] = s +-- >>> emptyHandle = LedgerTablesHandle undefined undefined undefined undefined undefined undefined undefined undefined +-- >>> [l0, l1, l2, l3, l4] = map (flip StateRef emptyHandle) s +-- >>> instance GetTip LS where getTip (LS p) = p +-- >>> instance Eq (LS EmptyMK) where LS p1 == LS p2 = p1 == p2 +-- >>> instance StandardHash B +-- >>> instance HasHeader B where getHeaderFields = undefined diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/VolatileDB/Impl.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/VolatileDB/Impl.hs index 6d1d09cfcd..71f45407e1 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/VolatileDB/Impl.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/VolatileDB/Impl.hs @@ -155,6 +155,9 @@ data VolatileDbArgs f m blk = VolatileDbArgs { -- 'Ouroboros.Consensus.Storage.Common.GetVerifiedBlock' components when -- extracting them from the VolatileDB. volCheckIntegrity :: HKD f (blk -> Bool) + -- ^ Predicate to check for integrity of + -- 'Ouroboros.Consensus.Storage.Common.GetVerifiedBlock' components when + -- extracting them from the VolatileDB. , volCodecConfig :: HKD f (CodecConfig blk) , volHasFS :: HKD f (SomeHasFS m) , volMaxBlocksPerFile :: BlocksPerFile @@ -162,6 +165,8 @@ data VolatileDbArgs f m blk = VolatileDbArgs { -- | Should the parser for the VolatileDB fail when it encounters a -- corrupt/invalid block? , volValidationPolicy :: BlockValidationPolicy + -- ^ Should the parser for the VolatileDB fail when it encounters a + -- corrupt/invalid block? } -- | Default arguments diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Ticked.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Ticked.hs index 7056e74305..999c1a53a8 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Ticked.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Ticked.hs @@ -2,11 +2,16 @@ {-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE FlexibleInstances #-} {-# LANGUAGE GeneralizedNewtypeDeriving #-} +{-# LANGUAGE PolyKinds #-} {-# LANGUAGE StandaloneDeriving #-} +{-# LANGUAGE StandaloneKindSignatures #-} {-# LANGUAGE TypeFamilies #-} {-# LANGUAGE TypeOperators #-} -module Ouroboros.Consensus.Ticked (Ticked (..)) where +module Ouroboros.Consensus.Ticked ( + Ticked (..) + , Ticked1 + ) where import Data.Kind (Type) import Data.SOP.BasicFunctors @@ -40,6 +45,7 @@ import Ouroboros.Consensus.Block.Abstract -- * New leader schedule computed for Shelley -- * Transition from Byron to Shelley activated in the hard fork combinator. -- * Nonces switched out at the start of a new epoch. +type Ticked :: Type -> Type data family Ticked st :: Type -- Standard instance for use with trivial state @@ -55,8 +61,17 @@ type instance HeaderHash (Ticked l) = HeaderHash l deriving newtype instance {-# OVERLAPPING #-} Show (Ticked (f a)) - => Show ((Ticked :.: f) a) + => Show ((Ticked :.: f) (a :: Type)) deriving newtype instance NoThunks (Ticked (f a)) => NoThunks ((Ticked :.: f) a) + +{------------------------------------------------------------------------------- + @'Ticked'@ for state with a poly-kinded type parameter +-------------------------------------------------------------------------------} + +type Ticked1 :: (k -> Type) -> (k -> Type) +data family Ticked1 st + +type instance HeaderHash (Ticked1 (l :: k -> Type)) = HeaderHash l diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/TypeFamilyWrappers.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/TypeFamilyWrappers.hs index 0d599a4a78..6d02b6255b 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/TypeFamilyWrappers.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/TypeFamilyWrappers.hs @@ -22,7 +22,9 @@ module Ouroboros.Consensus.TypeFamilyWrappers ( , WrapTentativeHeaderState (..) , WrapTentativeHeaderView (..) , WrapTipInfo (..) + , WrapTxIn (..) , WrapTxMeasure (..) + , WrapTxOut (..) , WrapValidatedGenTx (..) -- * Protocol based , WrapCanBeLeader (..) @@ -83,6 +85,9 @@ newtype WrapTipInfo blk = WrapTipInfo { unwrapTipInf newtype WrapValidatedGenTx blk = WrapValidatedGenTx { unwrapValidatedGenTx :: Validated (GenTx blk) } newtype WrapTxMeasure blk = WrapTxMeasure { unwrapTxMeasure :: TxMeasure blk } +newtype WrapTxIn blk = WrapTxIn { unwrapTxIn :: Key (LedgerState blk) } +newtype WrapTxOut blk = WrapTxOut { unwrapTxOut :: Value (LedgerState blk) } + {------------------------------------------------------------------------------- Consensus based -------------------------------------------------------------------------------} @@ -143,6 +148,16 @@ deriving instance NoThunks (TentativeHeaderState blk ) => NoThunks (WrapTent deriving instance NoThunks (TipInfo blk ) => NoThunks (WrapTipInfo blk) deriving instance NoThunks (Validated (GenTx blk)) => NoThunks (WrapValidatedGenTx blk) +deriving instance Show (Key (LedgerState blk)) => Show (WrapTxIn blk) +deriving instance Eq (Key (LedgerState blk)) => Eq (WrapTxIn blk) +deriving instance Ord (Key (LedgerState blk)) => Ord (WrapTxIn blk) +deriving instance NoThunks (Key (LedgerState blk)) => NoThunks (WrapTxIn blk) + +deriving instance Show (Value (LedgerState blk)) => Show (WrapTxOut blk) +deriving instance Eq (Value (LedgerState blk)) => Eq (WrapTxOut blk) +deriving instance Ord (Value (LedgerState blk)) => Ord (WrapTxOut blk) +deriving instance NoThunks (Value (LedgerState blk)) => NoThunks (WrapTxOut blk) + {------------------------------------------------------------------------------- .. consensus based -------------------------------------------------------------------------------} diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Util.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Util.hs index 7e566c2502..cb17c656df 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Util.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Util.hs @@ -57,6 +57,8 @@ module Ouroboros.Consensus.Util ( , checkThat -- * Sets , allDisjoint + -- * Maps + , dimap -- * Composition , (......:) , (.....:) @@ -77,6 +79,9 @@ module Ouroboros.Consensus.Util ( , electric , newFuse , withFuse + -- * withTMVar + , withTMVar + , withTMVarAnd ) where import Cardano.Crypto.Hash (Hash, HashAlgorithm, hashFromBytes, @@ -94,6 +99,8 @@ import Data.Functor.Product import Data.Kind (Type) import Data.List as List (foldl', maximumBy) import Data.List.NonEmpty (NonEmpty (..), (<|)) +import Data.Map (Map) +import qualified Data.Map.Strict as Map import Data.Maybe (fromMaybe) import Data.Set (Set) import qualified Data.Set as Set @@ -347,6 +354,15 @@ allDisjoint = go Set.empty go _ [] = True go acc (xs:xss) = Set.disjoint acc xs && go (Set.union acc xs) xss +{------------------------------------------------------------------------------- + Maps +-------------------------------------------------------------------------------} + +-- | Map over keys and values +dimap :: Ord k2 => (k1 -> k2) -> (v1 -> v2) -> Map k1 v1 -> Map k2 v2 +dimap keyFn valFn = Map.foldlWithKey update Map.empty + where update m k1 v1 = Map.insert (keyFn k1) (valFn v1) m + {------------------------------------------------------------------------------- Composition -------------------------------------------------------------------------------} @@ -450,3 +466,43 @@ withFuse (Fuse name m) (Electric io) = do newtype FuseBlownException = FuseBlownException Text deriving (Show) deriving anyclass (Exception) + +{------------------------------------------------------------------------------- + withTMVar +-------------------------------------------------------------------------------} + +-- | Apply @f@ with the content of @tv@ as state, restoring the original value when an +-- exception occurs +withTMVar :: + IOLike m + => StrictTMVar m a + -> (a -> m (c, a)) + -> m c +withTMVar tv f = withTMVarAnd tv (const $ pure ()) (\a -> const $ f a) + +-- | Apply @f@ with the content of @tv@ as state, restoring the original value +-- when an exception occurs. Additionally run a @STM@ action when acquiring the +-- value. +withTMVarAnd :: + IOLike m + => StrictTMVar m a + -> (a -> STM m b) -- ^ Additional STM action to run in the same atomically + -- block as the TMVar is acquired + -> (a -> b -> m (c, a)) -- ^ Action + -> m c +withTMVarAnd tv guard f = + fst . fst <$> generalBracket + (atomically $ do + istate <- takeTMVar tv + guarded <- guard istate + pure (istate, guarded) + ) + (\(origState, _) -> \case + ExitCaseSuccess (_, newState) + -> atomically $ putTMVar tv newState + ExitCaseException _ + -> atomically $ putTMVar tv origState + ExitCaseAbort + -> atomically $ putTMVar tv origState + ) + (uncurry f) diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Util/Args.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Util/Args.hs index c0fc821fed..18b26e4102 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Util/Args.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Util/Args.hs @@ -41,7 +41,7 @@ module Ouroboros.Consensus.Util.Args ( ) where import Data.Functor.Identity (Identity (..)) -import Data.Kind +import Data.Kind (Type) data Defaults t = NoDefault deriving (Functor) diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Util/DepPair.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Util/DepPair.hs index 9c07a7d9ba..9ef3b8cb9a 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Util/DepPair.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Util/DepPair.hs @@ -1,8 +1,11 @@ +{-# LANGUAGE DataKinds #-} {-# LANGUAGE DefaultSignatures #-} {-# LANGUAGE GADTs #-} {-# LANGUAGE MultiParamTypeClasses #-} {-# LANGUAGE PatternSynonyms #-} +{-# LANGUAGE PolyKinds #-} {-# LANGUAGE RankNTypes #-} +{-# LANGUAGE StandaloneKindSignatures #-} {-# LANGUAGE TypeFamilies #-} {-# LANGUAGE TypeOperators #-} @@ -13,6 +16,7 @@ module Ouroboros.Consensus.Util.DepPair ( , depPairFirst -- * Compare indices , SameDepIndex (..) + , SameDepIndex2 (..) -- * Trivial dependency , TrivialDependency (..) , fromTrivialDependency @@ -22,7 +26,7 @@ module Ouroboros.Consensus.Util.DepPair ( , (:~:) (..) ) where -import Data.Kind (Type) +import Data.Kind (Constraint, Type) import Data.Proxy import Data.SOP.BasicFunctors (I (..)) import Data.Type.Equality ((:~:) (..)) @@ -54,12 +58,17 @@ depPairFirst f (GenDepPair ix a) = GenDepPair (f ix) a Compare indices -------------------------------------------------------------------------------} +type SameDepIndex :: (Type -> Type) -> Constraint class SameDepIndex f where sameDepIndex :: f a -> f b -> Maybe (a :~: b) default sameDepIndex :: TrivialDependency f => f a -> f b -> Maybe (a :~: b) sameDepIndex ix ix' = Just $ hasSingleIndex ix ix' +type SameDepIndex2 :: (k1 -> k2 -> Type) -> Constraint +class SameDepIndex2 f where + sameDepIndex2 :: f x a -> f y b -> Maybe ('(x, a) :~: '(y, b)) + {------------------------------------------------------------------------------- Trivial dependencies -------------------------------------------------------------------------------} diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Util/EarlyExit.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Util/EarlyExit.hs index d132bec59c..0ecc19fe3b 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Util/EarlyExit.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Util/EarlyExit.hs @@ -92,7 +92,7 @@ collapse :: Maybe () -> () collapse Nothing = () collapse (Just ()) = () -exitEarly :: Applicative m => WithEarlyExit m a +exitEarly :: Monad m => WithEarlyExit m a exitEarly = earlyExit $ pure Nothing instance (forall a'. NoThunks (m a')) diff --git a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Util/IOLike.hs b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Util/IOLike.hs index 88a2b43a44..3dffaa223c 100644 --- a/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Util/IOLike.hs +++ b/ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Util/IOLike.hs @@ -56,6 +56,7 @@ import qualified Control.Concurrent.Class.MonadSTM.Strict as StrictSTM import Control.Monad.Class.MonadAsync import Control.Monad.Class.MonadEventlog import Control.Monad.Class.MonadFork +import Control.Monad.Class.MonadSay import Control.Monad.Class.MonadST import Control.Monad.Class.MonadThrow import Control.Monad.Class.MonadTime.SI @@ -88,9 +89,12 @@ class ( MonadAsync m , MonadMask m , MonadMonotonicTime m , MonadEvaluate m + , MonadTraceSTM m , Alternative (STM m) , MonadCatch (STM m) , PrimMonad m + , MonadSay m + , MonadLabelledSTM m , forall a. NoThunks (m a) , forall a. NoThunks a => NoThunks (StrictSTM.StrictTVar m a) , forall a. NoThunks a => NoThunks (StrictSVar m a) diff --git a/ouroboros-consensus/src/unstable-consensus-testlib/Test/LedgerTables.hs b/ouroboros-consensus/src/unstable-consensus-testlib/Test/LedgerTables.hs new file mode 100644 index 0000000000..10fa48e326 --- /dev/null +++ b/ouroboros-consensus/src/unstable-consensus-testlib/Test/LedgerTables.hs @@ -0,0 +1,57 @@ +{-# LANGUAGE FlexibleContexts #-} + +module Test.LedgerTables ( + prop_hasledgertables_laws + , prop_stowable_laws + ) where + +import Data.Function (on) +import Ouroboros.Consensus.Ledger.Basics +import Test.QuickCheck + +-- | We compare the Ledger Tables of the result because the comparison with the +-- rest of the LedgerState takes considerably more time to run. +(==?) :: + ( CanMapMK mk + , CanMapKeysMK mk + , ZeroableMK mk + , EqMK mk + , ShowMK mk + , HasLedgerTables (LedgerState blk) + ) + => LedgerState blk mk + -> LedgerState blk mk + -> Property +(==?) = (===) `on` projectLedgerTables + +infix 4 ==? + +-- | The StowableLedgerTables instances should follow these two laws: +-- +-- > stow . unstow == id +-- +-- > unstow . stow == id +prop_stowable_laws :: + ( HasLedgerTables (LedgerState blk) + , CanStowLedgerTables (LedgerState blk) + ) + => LedgerState blk EmptyMK + -> LedgerState blk ValuesMK + -> Property +prop_stowable_laws = \ls ls' -> + stowLedgerTables (unstowLedgerTables ls) ==? ls .&&. + unstowLedgerTables (stowLedgerTables ls') ==? ls' + +-- | The HasLedgerTables instances should follow these two laws: +-- +-- > with . project == id +-- +-- > project . with == id +prop_hasledgertables_laws :: + HasLedgerTables (LedgerState blk) + => LedgerState blk EmptyMK + -> LedgerTables (LedgerState blk) ValuesMK + -> Property +prop_hasledgertables_laws = \ls tbs -> + (ls `withLedgerTables` (projectLedgerTables ls)) ==? ls .&&. + projectLedgerTables (ls `withLedgerTables` tbs) === tbs diff --git a/ouroboros-consensus/src/unstable-consensus-testlib/Test/Util/ChainDB.hs b/ouroboros-consensus/src/unstable-consensus-testlib/Test/Util/ChainDB.hs index b81819ecd6..1962043c6e 100644 --- a/ouroboros-consensus/src/unstable-consensus-testlib/Test/Util/ChainDB.hs +++ b/ouroboros-consensus/src/unstable-consensus-testlib/Test/Util/ChainDB.hs @@ -19,16 +19,18 @@ import Ouroboros.Consensus.Block.Abstract import Ouroboros.Consensus.Config (TopLevelConfig (topLevelConfigLedger), configCodec) import Ouroboros.Consensus.HardFork.History.EraParams (eraEpochSize) +import Ouroboros.Consensus.Ledger.Basics import Ouroboros.Consensus.Ledger.Extended (ExtLedgerState) import Ouroboros.Consensus.Protocol.Abstract import Ouroboros.Consensus.Storage.ChainDB hiding (TraceFollowerEvent (..)) import Ouroboros.Consensus.Storage.ChainDB.Impl.Args -import Ouroboros.Consensus.Storage.ChainDB.Impl.LgrDB import Ouroboros.Consensus.Storage.ImmutableDB import qualified Ouroboros.Consensus.Storage.ImmutableDB as ImmutableDB import Ouroboros.Consensus.Storage.LedgerDB (configLedgerDb) -import qualified Ouroboros.Consensus.Storage.LedgerDB.DiskPolicy as LedgerDB +import Ouroboros.Consensus.Storage.LedgerDB.Impl.Args +import qualified Ouroboros.Consensus.Storage.LedgerDB.Impl.Snapshots as LedgerDB +import Ouroboros.Consensus.Storage.LedgerDB.V2.Args import Ouroboros.Consensus.Storage.VolatileDB import qualified Ouroboros.Consensus.Storage.VolatileDB as VolatileDB import Ouroboros.Consensus.Util.Args @@ -53,18 +55,18 @@ data NodeDBs db = NodeDBs { deriving (Functor, Foldable, Traversable) emptyNodeDBs :: MonadSTM m => m (NodeDBs (StrictTMVar m MockFS)) -emptyNodeDBs = NodeDBs - <$> atomically (newTMVar Mock.empty) - <*> atomically (newTMVar Mock.empty) - <*> atomically (newTMVar Mock.empty) - <*> atomically (newTMVar Mock.empty) +emptyNodeDBs = atomically $ NodeDBs + <$> newTMVar Mock.empty + <*> newTMVar Mock.empty + <*> newTMVar Mock.empty + <*> newTMVar Mock.empty -- | Minimal set of arguments for creating a ChainDB instance for testing purposes. data MinimalChainDbArgs m blk = MinimalChainDbArgs { mcdbTopLevelConfig :: TopLevelConfig blk , mcdbChunkInfo :: ImmutableDB.ChunkInfo -- ^ Specifies the layout of the ImmutableDB on disk. - , mcdbInitLedger :: ExtLedgerState blk + , mcdbInitLedger :: ExtLedgerState blk ValuesMK -- ^ The initial ledger state. , mcdbRegistry :: ResourceRegistry m -- ^ Keeps track of non-lexically scoped resources. @@ -110,14 +112,17 @@ fromMinimalChainDbArgs MinimalChainDbArgs {..} = ChainDbArgs { , volTracer = nullTracer , volValidationPolicy = VolatileDB.ValidateAll } - , cdbLgrDbArgs = LgrDbArgs { - lgrDiskPolicyArgs = LedgerDB.DiskPolicyArgs LedgerDB.DefaultSnapshotInterval LedgerDB.DefaultNumOfDiskSnapshots + , cdbLgrDbArgs = LedgerDbArgs { + lgrSnapshotPolicyArgs = LedgerDB.SnapshotPolicyArgs LedgerDB.DefaultSnapshotInterval LedgerDB.DefaultNumOfDiskSnapshots -- Keep 2 ledger snapshots, and take a new snapshot at least every 2 * -- k seconds, where k is the security parameter. - , lgrGenesis = return mcdbInitLedger - , lgrHasFS = SomeHasFS $ simHasFS (nodeDBsLgr mcdbNodeDBs) - , lgrTracer = nullTracer - , lgrConfig = configLedgerDb mcdbTopLevelConfig + , lgrGenesis = return mcdbInitLedger + , lgrHasFS = SomeHasFS $ simHasFS (nodeDBsLgr mcdbNodeDBs) + , lgrTracer = nullTracer + , lgrRegistry = mcdbRegistry + , lgrConfig = configLedgerDb mcdbTopLevelConfig + , lgrFlavorArgs = LedgerDbFlavorArgsV2 (V2Args InMemoryHandleArgs) + , lgrStartSnapshot = Nothing } , cdbsArgs = ChainDbSpecificArgs { cdbsBlocksToAddSize = 1 diff --git a/ouroboros-consensus/src/unstable-consensus-testlib/Test/Util/ChainUpdates.hs b/ouroboros-consensus/src/unstable-consensus-testlib/Test/Util/ChainUpdates.hs index 43d91a672b..df75f36253 100644 --- a/ouroboros-consensus/src/unstable-consensus-testlib/Test/Util/ChainUpdates.hs +++ b/ouroboros-consensus/src/unstable-consensus-testlib/Test/Util/ChainUpdates.hs @@ -13,14 +13,14 @@ module Test.Util.ChainUpdates ( ) where import Control.Monad (replicateM, replicateM_) -import Control.Monad.State.Strict (MonadTrans, execStateT, get, lift, - modify) +import Control.Monad.State.Strict (execStateT, get, lift, modify) import Ouroboros.Consensus.Block import Ouroboros.Consensus.Config import Ouroboros.Consensus.Util.Condense (Condense (..)) import Ouroboros.Network.Mock.Chain (Chain (Genesis)) import qualified Ouroboros.Network.Mock.Chain as Chain import Test.QuickCheck +import Test.Util.QuickCheck (frequency') import Test.Util.TestBlock data ChainUpdate = @@ -161,17 +161,6 @@ genChainUpdateState updateBehavior securityParam n = genAddBlock Invalid genSwitchFork (pure 1) --- | Variant of 'frequency' that allows for transformers of 'Gen' -frequency' :: (MonadTrans t, Monad (t Gen)) => [(Int, t Gen a)] -> t Gen a -frequency' [] = error "frequency' used with empty list" -frequency' xs0 = lift (choose (1, tot)) >>= (`pick` xs0) - where - tot = sum (map fst xs0) - - pick n ((k,x):xs) - | n <= k = x - | otherwise = pick (n-k) xs - pick _ _ = error "pick used with empty list" -- | Test that applying the generated updates gives us the same chain -- as @cusCurrentChain@. diff --git a/ouroboros-consensus/src/unstable-consensus-testlib/Test/Util/LedgerStateOnlyTables.hs b/ouroboros-consensus/src/unstable-consensus-testlib/Test/Util/LedgerStateOnlyTables.hs new file mode 100644 index 0000000000..d6e64aced8 --- /dev/null +++ b/ouroboros-consensus/src/unstable-consensus-testlib/Test/Util/LedgerStateOnlyTables.hs @@ -0,0 +1,79 @@ +{-# LANGUAGE DataKinds #-} +{-# LANGUAGE DerivingStrategies #-} +{-# LANGUAGE FlexibleInstances #-} +{-# LANGUAGE NamedFieldPuns #-} +{-# LANGUAGE PatternSynonyms #-} +{-# LANGUAGE StandaloneDeriving #-} +{-# LANGUAGE TypeFamilies #-} + +-- | A simple ledger state that only holds ledger tables (and values). +-- +-- This is useful when we only need a ledger state and ledger tables, but not +-- necessarily blocks with payloads (such as defined in @Test.Util.TestBlock@). +module Test.Util.LedgerStateOnlyTables ( + OTLedgerState + , OTLedgerTables + , pattern OTLedgerState + ) where + +import Cardano.Binary (FromCBOR (..), ToCBOR (..)) +import NoThunks.Class (NoThunks) +import Ouroboros.Consensus.Ledger.Basics (LedgerState) +import Ouroboros.Consensus.Ledger.Tables (CanSerializeLedgerTables, + CanStowLedgerTables (..), HasLedgerTables (..), Key, + LedgerTables (..), MapKind, Value, ValuesMK, + ZeroableMK (..)) +import Ouroboros.Consensus.Ledger.Tables.Utils (emptyLedgerTables) + +{------------------------------------------------------------------------------- + Simple ledger state +-------------------------------------------------------------------------------} + +type OTLedgerState k v = LedgerState (OTBlock k v) +type OTLedgerTables k v = LedgerTables (OTLedgerState k v) + +-- | An empty type for blocks, which is only used to record the types @k@ and +-- @v@. +data OTBlock k v + +data instance LedgerState (OTBlock k v) (mk :: MapKind) = OTLedgerState { + otlsLedgerState :: ValuesMK k v + , otlsLedgerTables :: OTLedgerTables k v mk + } + +deriving instance (Ord k, Eq v, Eq (mk k v)) + => Eq (OTLedgerState k v mk) +deriving stock instance (Show k, Show v, Show (mk k v)) + => Show (OTLedgerState k v mk) + +instance (ToCBOR k, FromCBOR k, ToCBOR v, FromCBOR v) + => CanSerializeLedgerTables (OTLedgerState k v) where + +{------------------------------------------------------------------------------- + Stowable +-------------------------------------------------------------------------------} + +instance (Ord k, Eq v) + => CanStowLedgerTables (OTLedgerState k v) where + stowLedgerTables OTLedgerState{otlsLedgerTables} = + OTLedgerState (getLedgerTables otlsLedgerTables) emptyLedgerTables + + unstowLedgerTables OTLedgerState{otlsLedgerState} = + OTLedgerState + emptyMK + (LedgerTables otlsLedgerState) + +{------------------------------------------------------------------------------- + Simple ledger tables +-------------------------------------------------------------------------------} + +type instance Key (OTLedgerState k v) = k +type instance Value (OTLedgerState k v) = v + +instance (Ord k, Eq v, Show k, Show v, NoThunks k, NoThunks v) + => HasLedgerTables (OTLedgerState k v) where + projectLedgerTables OTLedgerState{otlsLedgerTables} = + otlsLedgerTables + + withLedgerTables st lt = + st { otlsLedgerTables = lt } diff --git a/ouroboros-consensus/src/unstable-consensus-testlib/Test/Util/Orphans/Arbitrary.hs b/ouroboros-consensus/src/unstable-consensus-testlib/Test/Util/Orphans/Arbitrary.hs index 52e2cfed17..27f73d4a0c 100644 --- a/ouroboros-consensus/src/unstable-consensus-testlib/Test/Util/Orphans/Arbitrary.hs +++ b/ouroboros-consensus/src/unstable-consensus-testlib/Test/Util/Orphans/Arbitrary.hs @@ -6,6 +6,7 @@ {-# LANGUAGE GeneralisedNewtypeDeriving #-} {-# LANGUAGE NamedFieldPuns #-} {-# LANGUAGE NumericUnderscores #-} +{-# LANGUAGE PolyKinds #-} {-# LANGUAGE ScopedTypeVariables #-} {-# LANGUAGE StandaloneDeriving #-} {-# LANGUAGE TypeApplications #-} @@ -29,6 +30,7 @@ import Data.Coerce (coerce) import Data.SOP.BasicFunctors import Data.SOP.Constraint import Data.SOP.Dict (Dict (..), all_NP, mapAll) +import Data.SOP.Functors (Flip (..)) import Data.SOP.NonEmpty (IsNonEmpty, ProofNonEmpty (..), checkIsNonEmpty, isNonEmpty) import Data.SOP.Sing @@ -53,7 +55,8 @@ import Ouroboros.Consensus.MiniProtocol.ChainSync.Client.InFutureCheck (ClockSkew) import qualified Ouroboros.Consensus.MiniProtocol.ChainSync.Client.InFutureCheck as InFutureCheck import Ouroboros.Consensus.Node.ProtocolInfo -import Ouroboros.Consensus.Protocol.Abstract (ChainDepState) +import Ouroboros.Consensus.Protocol.Abstract (ChainDepState, + SecurityParam (..)) import Ouroboros.Consensus.Storage.ChainDB.API (LoE (..)) import Ouroboros.Consensus.Storage.ImmutableDB.Chunks.Internal (ChunkNo (..), ChunkSize (..), RelativeSlot (..)) @@ -269,6 +272,9 @@ instance (All (Arbitrary `Compose` f) xs, IsNonEmpty xs) Telescope & HardForkState -------------------------------------------------------------------------------} +instance Arbitrary (f y x) => Arbitrary (Flip f (x :: kx) (y :: ky)) where + arbitrary = Flip <$> arbitrary + instance Arbitrary Bound where arbitrary = Bound @@ -295,25 +301,25 @@ instance ( IsNonEmpty xs ] shrink = hctraverse' (Proxy @(Arbitrary `Compose` f)) shrink -instance (IsNonEmpty xs, SListI xs, All (Arbitrary `Compose` LedgerState) xs) - => Arbitrary (LedgerState (HardForkBlock xs)) where +instance (IsNonEmpty xs, SListI xs, All (Arbitrary `Compose` Flip LedgerState mk) xs) + => Arbitrary (LedgerState (HardForkBlock xs) mk) where arbitrary = case (dictKPast, dictCurrentLedgerState) of (Dict, Dict) -> inj <$> arbitrary where inj :: - Telescope (K Past) (Current LedgerState) xs - -> LedgerState (HardForkBlock xs) + Telescope (K Past) (Current (Flip LedgerState mk)) xs + -> LedgerState (HardForkBlock xs) mk inj = coerce dictKPast :: Dict (All (Arbitrary `Compose` (K Past))) xs dictKPast = all_NP $ hpure Dict dictCurrentLedgerState :: - Dict (All (Arbitrary `Compose` (Current LedgerState))) xs + Dict (All (Arbitrary `Compose` (Current (Flip LedgerState mk)))) xs dictCurrentLedgerState = mapAll - @(Arbitrary `Compose` LedgerState) - @(Arbitrary `Compose` Current LedgerState) + @(Arbitrary `Compose` Flip LedgerState mk) + @(Arbitrary `Compose` Current (Flip LedgerState mk)) (\Dict -> Dict) Dict @@ -391,13 +397,12 @@ instance Arbitrary QueryVersion where arbitrary = arbitraryBoundedEnum shrink v = if v == minBound then [] else [pred v] -instance Arbitrary (SomeSecond BlockQuery blk) +instance Arbitrary (SomeBlockQuery (BlockQuery blk)) => Arbitrary (SomeSecond Query blk) where arbitrary = do - SomeSecond someBlockQuery <- arbitrary + SomeBlockQuery someBlockQuery <- arbitrary return (SomeSecond (BlockQuery someBlockQuery)) - instance Arbitrary Index.CacheConfig where arbitrary = do pastChunksToCache <- frequency @@ -418,3 +423,11 @@ instance Arbitrary a => Arbitrary (LoE a) where arbitrary = oneof [pure LoEDisabled, LoEEnabled <$> arbitrary] shrink LoEDisabled = [] shrink (LoEEnabled x) = LoEDisabled : map LoEEnabled (shrink x) + +{------------------------------------------------------------------------------- + SecurityParam +-------------------------------------------------------------------------------} + +instance Arbitrary SecurityParam where + arbitrary = SecurityParam <$> choose (0, 6) + shrink (SecurityParam k) = SecurityParam <$> shrink k diff --git a/ouroboros-consensus/src/unstable-consensus-testlib/Test/Util/Orphans/IOLike.hs b/ouroboros-consensus/src/unstable-consensus-testlib/Test/Util/Orphans/IOLike.hs index 9748bb498e..3cadd35a04 100644 --- a/ouroboros-consensus/src/unstable-consensus-testlib/Test/Util/Orphans/IOLike.hs +++ b/ouroboros-consensus/src/unstable-consensus-testlib/Test/Util/Orphans/IOLike.hs @@ -1,12 +1,16 @@ {-# LANGUAGE FlexibleInstances #-} +{-# LANGUAGE MultiParamTypeClasses #-} {-# OPTIONS_GHC -Wno-orphans #-} module Test.Util.Orphans.IOLike () where +import Control.Monad.Base import Control.Monad.IOSim import Ouroboros.Consensus.Util.IOLike import Test.Util.Orphans.NoThunks () instance IOLike (IOSim s) where forgetSignKeyKES = const $ return () + +instance MonadBase (IOSim s) (IOSim s) where liftBase = id diff --git a/ouroboros-consensus/src/unstable-consensus-testlib/Test/Util/Orphans/ToExpr.hs b/ouroboros-consensus/src/unstable-consensus-testlib/Test/Util/Orphans/ToExpr.hs index fc76d4036c..3215a9a57d 100644 --- a/ouroboros-consensus/src/unstable-consensus-testlib/Test/Util/Orphans/ToExpr.hs +++ b/ouroboros-consensus/src/unstable-consensus-testlib/Test/Util/Orphans/ToExpr.hs @@ -1,9 +1,15 @@ +{-# LANGUAGE CPP #-} {-# LANGUAGE DeriveAnyClass #-} {-# LANGUAGE DeriveGeneric #-} {-# LANGUAGE DerivingStrategies #-} {-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE FlexibleInstances #-} +{-# LANGUAGE GeneralizedNewtypeDeriving #-} {-# LANGUAGE StandaloneDeriving #-} +#if __GLASGOW_HASKELL__ <= 906 +{-# LANGUAGE TypeFamilies #-} +#endif +{-# LANGUAGE TypeOperators #-} {-# LANGUAGE UndecidableInstances #-} {-# OPTIONS_GHC -Wno-orphans #-} @@ -17,9 +23,11 @@ import Ouroboros.Consensus.Block import Ouroboros.Consensus.HeaderValidation import Ouroboros.Consensus.Ledger.Abstract import Ouroboros.Consensus.Ledger.Extended +import Ouroboros.Consensus.Ledger.SupportsMempool +import Ouroboros.Consensus.Mempool.API +import Ouroboros.Consensus.Mempool.TxSeq import Ouroboros.Consensus.Protocol.Abstract import Ouroboros.Consensus.Storage.ChainDB.API (LoE (..)) -import Ouroboros.Consensus.Storage.ChainDB.Impl.LgrDB import Ouroboros.Consensus.Storage.ImmutableDB import Ouroboros.Consensus.Util.STM (Fingerprint, WithFingerprint) import Ouroboros.Network.AnchoredFragment (AnchoredFragment) @@ -52,20 +60,16 @@ instance (ToExpr blk, ToExpr (HeaderHash blk)) => ToExpr (AnchoredFragment blk) ouroboros-consensus -------------------------------------------------------------------------------} -instance ( ToExpr (LedgerState blk) +instance ( ToExpr (LedgerState blk EmptyMK) , ToExpr (ChainDepState (BlockProtocol blk)) , ToExpr (TipInfo blk) - ) => ToExpr (ExtLedgerState blk) + ) => ToExpr (ExtLedgerState blk EmptyMK) instance ( ToExpr (ChainDepState (BlockProtocol blk)) , ToExpr (TipInfo blk) ) => ToExpr (HeaderState blk) -instance ( ToExpr (TipInfo blk) - ) => ToExpr (AnnTip blk) - instance ToExpr SecurityParam -instance ToExpr DiskSnapshot instance ToExpr ChunkSize instance ToExpr ChunkNo @@ -112,3 +116,28 @@ deriving instance ( ToExpr blk ) => ToExpr (ChainProducerState blk) deriving instance ToExpr a => ToExpr (WithFingerprint a) + +instance ToExpr (TipInfo blk) => ToExpr (AnnTip blk) + +{------------------------------------------------------------------------------- + Mempool and transactions +-------------------------------------------------------------------------------} + +deriving newtype instance ToExpr TicketNo + +instance Show (TxId (GenTx blk)) => ToExpr (TxId (GenTx blk)) where + toExpr x = App (show x) [] + +deriving instance ( ToExpr (GenTx blk) + , LedgerSupportsMempool blk + , measure ~ TxMeasure blk + , ToExpr measure + , ToExpr (Validated (GenTx blk)) + ) => ToExpr (TxTicket measure (Validated (GenTx blk))) + +instance ( ToExpr (GenTx blk) + , LedgerSupportsMempool blk + , ToExpr (Validated (GenTx blk)) + ) => ToExpr (MempoolAddTxResult blk) where + toExpr (MempoolTxAdded vtx) = App "Added" [toExpr vtx] + toExpr (MempoolTxRejected tx e) = App "Rejected" [toExpr tx, App (show e) [] ] diff --git a/ouroboros-consensus/src/unstable-consensus-testlib/Test/Util/QuickCheck.hs b/ouroboros-consensus/src/unstable-consensus-testlib/Test/Util/QuickCheck.hs index 5340949446..3a2880f6d1 100644 --- a/ouroboros-consensus/src/unstable-consensus-testlib/Test/Util/QuickCheck.hs +++ b/ouroboros-consensus/src/unstable-consensus-testlib/Test/Util/QuickCheck.hs @@ -17,6 +17,9 @@ module Test.Util.QuickCheck ( , le , lt , strictlyIncreasing + -- * Gen variants that allow transformers + , frequency' + , oneof' -- * Comparing maps , isSubmapOfBy -- * Improved variants @@ -33,7 +36,8 @@ module Test.Util.QuickCheck ( , prop_lawfulEqAndTotalOrd ) where -import Control.Monad.Except +import Control.Monad.Except (Except, runExcept) +import Control.Monad.Trans (MonadTrans (..)) import Data.Map.Strict (Map) import qualified Data.Map.Strict as Map import Data.Proxy @@ -267,3 +271,23 @@ prop_lawfulEqAndTotalOrd a b c = conjoin , counterexample "max a b == if a >= b then a else b VIOLATED" $ max a b === if a >= b then a else b ] + +{------------------------------------------------------------------------------- + Generator variants that allow for transformers +-------------------------------------------------------------------------------} + +-- | Variant of 'frequency' that allows for transformers of 'Gen' +frequency' :: (MonadTrans t, Monad (t Gen)) => [(Int, t Gen a)] -> t Gen a +frequency' [] = error "frequency' used with empty list" +frequency' xs0 = lift (choose (1, tot)) >>= (`pick` xs0) + where + tot = sum (map fst xs0) + + pick n ((k,x):xs) + | n <= k = x + | otherwise = pick (n-k) xs + pick _ _ = error "pick used with empty list" + +oneof' :: (MonadTrans t, Monad (t Gen)) => [t Gen a] -> t Gen a +oneof' [] = error "QuickCheck.oneof used with empty list" +oneof' gs = lift (chooseInt (0,length gs - 1)) >>= (gs !!) diff --git a/ouroboros-consensus/src/unstable-consensus-testlib/Test/Util/Serialisation/Examples.hs b/ouroboros-consensus/src/unstable-consensus-testlib/Test/Util/Serialisation/Examples.hs index 39a2b91ed3..207662ec61 100644 --- a/ouroboros-consensus/src/unstable-consensus-testlib/Test/Util/Serialisation/Examples.hs +++ b/ouroboros-consensus/src/unstable-consensus-testlib/Test/Util/Serialisation/Examples.hs @@ -16,11 +16,12 @@ module Test.Util.Serialisation.Examples ( import Data.Bifunctor (first) import Ouroboros.Consensus.Block (BlockProtocol, Header, HeaderHash, - SlotNo, SomeSecond) + SlotNo) import Ouroboros.Consensus.HeaderValidation (AnnTip) -import Ouroboros.Consensus.Ledger.Abstract (LedgerState) +import Ouroboros.Consensus.Ledger.Abstract (EmptyMK, LedgerState, + LedgerTables, ValuesMK) import Ouroboros.Consensus.Ledger.Extended (ExtLedgerState) -import Ouroboros.Consensus.Ledger.Query (BlockQuery) +import Ouroboros.Consensus.Ledger.Query (BlockQuery, SomeBlockQuery) import Ouroboros.Consensus.Ledger.SupportsMempool (ApplyTxErr, GenTx, GenTxId) import Ouroboros.Consensus.Protocol.Abstract (ChainDepState) @@ -41,13 +42,14 @@ data Examples blk = Examples { , exampleGenTx :: Labelled (GenTx blk) , exampleGenTxId :: Labelled (GenTxId blk) , exampleApplyTxErr :: Labelled (ApplyTxErr blk) - , exampleQuery :: Labelled (SomeSecond BlockQuery blk) + , exampleQuery :: Labelled (SomeBlockQuery (BlockQuery blk)) , exampleResult :: Labelled (SomeResult blk) , exampleAnnTip :: Labelled (AnnTip blk) - , exampleLedgerState :: Labelled (LedgerState blk) + , exampleLedgerState :: Labelled (LedgerState blk EmptyMK) , exampleChainDepState :: Labelled (ChainDepState (BlockProtocol blk)) - , exampleExtLedgerState :: Labelled (ExtLedgerState blk) + , exampleExtLedgerState :: Labelled (ExtLedgerState blk EmptyMK) , exampleSlotNo :: Labelled SlotNo + , exampleLedgerTables :: Labelled (LedgerTables (LedgerState blk) ValuesMK) } emptyExamples :: Examples blk @@ -67,6 +69,7 @@ emptyExamples = Examples { , exampleChainDepState = mempty , exampleExtLedgerState = mempty , exampleSlotNo = mempty + , exampleLedgerTables = mempty } combineExamples :: @@ -91,6 +94,7 @@ combineExamples f e1 e2 = Examples { , exampleChainDepState = combine exampleChainDepState , exampleExtLedgerState = combine exampleExtLedgerState , exampleSlotNo = combine exampleSlotNo + , exampleLedgerTables = combine exampleLedgerTables } where combine :: (Examples blk -> Labelled a) -> Labelled a diff --git a/ouroboros-consensus/src/unstable-consensus-testlib/Test/Util/Serialisation/Golden.hs b/ouroboros-consensus/src/unstable-consensus-testlib/Test/Util/Serialisation/Golden.hs index 1504ed06b8..1dce0500f4 100644 --- a/ouroboros-consensus/src/unstable-consensus-testlib/Test/Util/Serialisation/Golden.hs +++ b/ouroboros-consensus/src/unstable-consensus-testlib/Test/Util/Serialisation/Golden.hs @@ -1,3 +1,4 @@ +{-# LANGUAGE DataKinds #-} {-# LANGUAGE DefaultSignatures #-} {-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE FlexibleInstances #-} @@ -50,9 +51,12 @@ import Data.Proxy (Proxy (..)) import Data.TreeDiff import GHC.Stack (HasCallStack) import Ouroboros.Consensus.Block (CodecConfig) +import Ouroboros.Consensus.Ledger.Abstract (LedgerState) import Ouroboros.Consensus.Ledger.Extended (encodeExtLedgerState) import Ouroboros.Consensus.Ledger.Query (QueryVersion, nodeToClientVersionToQueryVersion) +import Ouroboros.Consensus.Ledger.Tables (HasLedgerTables, + valuesMKEncoder) import Ouroboros.Consensus.Node.NetworkProtocolVersion (HasNetworkProtocolVersion (..), SupportedNetworkProtocolVersion (..)) @@ -61,7 +65,7 @@ import Ouroboros.Consensus.Node.Run (SerialiseDiskConstraints, SerialiseNodeToNodeConstraints) import Ouroboros.Consensus.Node.Serialisation (SerialiseNodeToClient (..), SerialiseNodeToNode (..), - SerialiseResult (..)) + SerialiseResult' (..)) import Ouroboros.Consensus.Storage.Serialisation (EncodeDisk (..)) import Ouroboros.Consensus.Util.CBOR (decodeAsFlatTerm) import Ouroboros.Consensus.Util.Condense (Condense (..)) @@ -153,10 +157,12 @@ goldenTestCBOR testName example enc goldenFile = , show (ansiWlEditExpr (ediff (CBORBytes golden) (CBORBytes actual))) ] - (Right actualFlatTerm, Left _) -> Just $ unlines [ + (Right actualFlatTerm, Left e) -> Just $ unlines [ "Golden output /= actual term:" , "Golden output is not valid CBOR:" , BS.UTF8.toString golden + , "Exception: " + , show e , "Actual term:" , condense actualFlatTerm ] @@ -218,6 +224,7 @@ goldenTest_all :: ( SerialiseDiskConstraints blk , SerialiseNodeToNodeConstraints blk , SerialiseNodeToClientConstraints blk + , HasLedgerTables (LedgerState blk) , SupportedNetworkProtocolVersion blk , ToGoldenDirectory (BlockNodeToNodeVersion blk) @@ -241,7 +248,11 @@ goldenTest_all codecConfig goldenDir examples = -- TODO how can we ensure that we have a test for each constraint listed in -- 'SerialiseDiskConstraints'? goldenTest_SerialiseDisk :: - forall blk. (SerialiseDiskConstraints blk, HasCallStack) + forall blk. + ( HasLedgerTables (LedgerState blk) + , SerialiseDiskConstraints blk + , HasCallStack + ) => CodecConfig blk -> FilePath -> Examples blk @@ -254,6 +265,7 @@ goldenTest_SerialiseDisk codecConfig goldenDir Examples {..} = , test "AnnTip" exampleAnnTip (encodeDisk codecConfig) , test "ChainDepState" exampleChainDepState (encodeDisk codecConfig) , test "ExtLedgerState" exampleExtLedgerState encodeExt + , test "LedgerTables" exampleLedgerTables valuesMKEncoder ] where test :: TestName -> Labelled a -> (a -> Encoding) -> TestTree @@ -345,7 +357,7 @@ goldenTest_SerialiseNodeToClient codecConfig goldenDir Examples {..} = enc' = encodeNodeToClient codecConfig blockVersion encRes :: SomeResult blk -> Encoding - encRes (SomeResult q r) = encodeResult codecConfig blockVersion q r + encRes (SomeResult q r) = encodeResult' codecConfig blockVersion q r test :: TestName -> Labelled a -> (a -> Encoding) -> TestTree test testName exampleValues enc = diff --git a/ouroboros-consensus/src/unstable-consensus-testlib/Test/Util/Serialisation/Roundtrip.hs b/ouroboros-consensus/src/unstable-consensus-testlib/Test/Util/Serialisation/Roundtrip.hs index 7425098bc0..437d8574a5 100644 --- a/ouroboros-consensus/src/unstable-consensus-testlib/Test/Util/Serialisation/Roundtrip.hs +++ b/ouroboros-consensus/src/unstable-consensus-testlib/Test/Util/Serialisation/Roundtrip.hs @@ -1,4 +1,5 @@ {-# LANGUAGE ConstraintKinds #-} +{-# LANGUAGE DataKinds #-} {-# LANGUAGE DeriveGeneric #-} {-# LANGUAGE DeriveTraversable #-} {-# LANGUAGE DerivingVia #-} @@ -56,15 +57,15 @@ import GHC.Generics (Generic) import Ouroboros.Consensus.Block import Ouroboros.Consensus.HeaderValidation (AnnTip) import Ouroboros.Consensus.Ledger.Abstract (LedgerState) -import Ouroboros.Consensus.Ledger.Extended (ExtLedgerState, - decodeExtLedgerState, encodeExtLedgerState) -import Ouroboros.Consensus.Ledger.Query (BlockQuery, Query (..), - QueryVersion) +import Ouroboros.Consensus.Ledger.Extended (decodeDiskExtLedgerState, + encodeDiskExtLedgerState) +import Ouroboros.Consensus.Ledger.Query import qualified Ouroboros.Consensus.Ledger.Query as Query import Ouroboros.Consensus.Ledger.SupportsMempool (ApplyTxErr, GenTx, GenTxId) import Ouroboros.Consensus.Ledger.SupportsProtocol (LedgerSupportsProtocol) +import Ouroboros.Consensus.Ledger.Tables (EmptyMK) import Ouroboros.Consensus.Node.NetworkProtocolVersion import Ouroboros.Consensus.Node.Run (SerialiseNodeToClientConstraints, SerialiseNodeToNodeConstraints (..)) @@ -191,7 +192,7 @@ roundtrip_all , Arbitrary' blk , Arbitrary' (Header blk) , Arbitrary' (HeaderHash blk) - , Arbitrary' (LedgerState blk) + , Arbitrary' (LedgerState blk EmptyMK) , Arbitrary' (AnnTip blk) , Arbitrary' (ChainDepState (BlockProtocol blk)) @@ -205,7 +206,7 @@ roundtrip_all , ArbitraryWithVersion (BlockNodeToClientVersion blk) blk , ArbitraryWithVersion (BlockNodeToClientVersion blk) (GenTx blk) , ArbitraryWithVersion (BlockNodeToClientVersion blk) (ApplyTxErr blk) - , ArbitraryWithVersion (BlockNodeToClientVersion blk) (SomeSecond BlockQuery blk) + , ArbitraryWithVersion (BlockNodeToClientVersion blk) (SomeBlockQuery (BlockQuery blk)) , ArbitraryWithVersion (BlockNodeToClientVersion blk) (SomeResult blk) , ArbitraryWithVersion (QueryVersion, BlockNodeToClientVersion blk) (SomeSecond Query blk) ) @@ -239,7 +240,7 @@ roundtrip_all_skipping , Arbitrary' blk , Arbitrary' (Header blk) , Arbitrary' (HeaderHash blk) - , Arbitrary' (LedgerState blk) + , Arbitrary' (LedgerState blk EmptyMK) , Arbitrary' (AnnTip blk) , Arbitrary' (ChainDepState (BlockProtocol blk)) @@ -253,7 +254,7 @@ roundtrip_all_skipping , ArbitraryWithVersion (BlockNodeToClientVersion blk) blk , ArbitraryWithVersion (BlockNodeToClientVersion blk) (GenTx blk) , ArbitraryWithVersion (BlockNodeToClientVersion blk) (ApplyTxErr blk) - , ArbitraryWithVersion (BlockNodeToClientVersion blk) (SomeSecond BlockQuery blk) + , ArbitraryWithVersion (BlockNodeToClientVersion blk) (SomeBlockQuery (BlockQuery blk)) , ArbitraryWithVersion (BlockNodeToClientVersion blk) (SomeResult blk) , ArbitraryWithVersion (QueryVersion, BlockNodeToClientVersion blk) (SomeSecond Query blk) ) @@ -280,7 +281,7 @@ roundtrip_SerialiseDisk ( SerialiseDiskConstraints blk , Arbitrary' blk , Arbitrary' (Header blk) - , Arbitrary' (LedgerState blk) + , Arbitrary' (LedgerState blk EmptyMK) , Arbitrary' (AnnTip blk) , Arbitrary' (ChainDepState (BlockProtocol blk)) ) @@ -301,7 +302,7 @@ roundtrip_SerialiseDisk ccfg dictNestedHdr = -- Since the 'LedgerState' is a large data structure, we lower the -- number of tests to avoid slowing down the testsuite too much , adjustQuickCheckTests (`div` 10) $ - rt (Proxy @(LedgerState blk)) "LedgerState" + rt (Proxy @(LedgerState blk EmptyMK)) "LedgerState" , rt (Proxy @(AnnTip blk)) "AnnTip" , rt (Proxy @(ChainDepState (BlockProtocol blk))) "ChainDepState" ] @@ -329,7 +330,7 @@ type ArbitraryWithVersion v a = (Arbitrary (WithVersion v a), Eq a, Show a) instance ( blockVersion ~ BlockNodeToClientVersion blk , Arbitrary blockVersion - , Arbitrary (WithVersion (BlockNodeToClientVersion blk) (SomeSecond BlockQuery blk)) + , Arbitrary (WithVersion (BlockNodeToClientVersion blk) (SomeBlockQuery (BlockQuery blk))) ) => Arbitrary (WithVersion (QueryVersion, blockVersion) (SomeSecond Query blk)) where arbitrary = do @@ -341,7 +342,8 @@ instance ( blockVersion ~ BlockNodeToClientVersion blk Query.QueryVersion1 -> genTopLevelQuery1 Query.QueryVersion2 -> genTopLevelQuery2 where - mkEntry :: QueryVersion + mkEntry :: + QueryVersion -> Query blk query -> Gen (WithVersion (QueryVersion, blockVersion) (SomeSecond Query blk)) @@ -369,7 +371,7 @@ instance ( blockVersion ~ BlockNodeToClientVersion blk -> Gen (WithVersion (QueryVersion, blockVersion) (SomeSecond Query blk)) arbitraryBlockQuery queryVersion = do - WithVersion blockV (SomeSecond someBlockQuery) <- arbitrary + WithVersion blockV (SomeBlockQuery someBlockQuery) <- arbitrary return (WithVersion (queryVersion, blockV) (SomeSecond (BlockQuery someBlockQuery))) @@ -494,7 +496,7 @@ roundtrip_SerialiseNodeToClient , ArbitraryWithVersion (BlockNodeToClientVersion blk) blk , ArbitraryWithVersion (BlockNodeToClientVersion blk) (GenTx blk) , ArbitraryWithVersion (BlockNodeToClientVersion blk) (ApplyTxErr blk) - , ArbitraryWithVersion (BlockNodeToClientVersion blk) (SomeSecond BlockQuery blk) + , ArbitraryWithVersion (BlockNodeToClientVersion blk) (SomeBlockQuery (BlockQuery blk)) , ArbitraryWithVersion (BlockNodeToClientVersion blk) (SomeResult blk) , ArbitraryWithVersion (QueryVersion, BlockNodeToClientVersion blk) (SomeSecond Query blk) @@ -509,7 +511,7 @@ roundtrip_SerialiseNodeToClient shouldCheckCBORvalidity ccfg = [ rt (Proxy @blk) "blk" , rt (Proxy @(GenTx blk)) "GenTx" , rt (Proxy @(ApplyTxErr blk)) "ApplyTxErr" - , rt (Proxy @(SomeSecond BlockQuery blk)) "BlockQuery" + , rt (Proxy @(SomeBlockQuery (BlockQuery blk))) "BlockQuery" , rtWith @(SomeSecond Query blk) @(QueryVersion, BlockNodeToClientVersion blk) @@ -548,8 +550,8 @@ roundtrip_SerialiseNodeToClient shouldCheckCBORvalidity ccfg = \(WithVersion version (SomeResult query result :: SomeResult blk)) -> roundtripAnd (shouldCheckCBORvalidity testLabel) - (encodeResult ccfg version query) - (const <$> decodeResult ccfg version query) + (encodeResult' ccfg version query) + (const <$> decodeResult' ccfg version query) result ] where @@ -734,7 +736,7 @@ examplesRoundtrip codecConfig examples = , testRoundtripFor "Ledger state" (encodeDisk codecConfig) (const <$> decodeDisk codecConfig) exampleLedgerState , testRoundtripFor "Annotated tip" (encodeDisk codecConfig) (const <$> decodeDisk codecConfig) exampleAnnTip , testRoundtripFor "Chain dependent state" (encodeDisk codecConfig) (const <$> decodeDisk codecConfig) exampleChainDepState - , testRoundtripFor "Extended ledger state" encodeExt (const <$> decodeExt) exampleExtLedgerState + , testRoundtripFor "Extended ledger state" (encodeDiskExtLedgerState codecConfig) (const <$> decodeDiskExtLedgerState codecConfig) exampleExtLedgerState ] where testRoundtripFor :: @@ -754,16 +756,3 @@ examplesRoundtrip codecConfig examples = testProperty (fromMaybe "" exampleName) $ once $ roundtrip' enc dec example - - encodeExt = - encodeExtLedgerState - (encodeDisk codecConfig) - (encodeDisk codecConfig) - (encodeDisk codecConfig) - - decodeExt :: forall s. Decoder s (ExtLedgerState blk) - decodeExt = - decodeExtLedgerState - (decodeDisk codecConfig) - (decodeDisk codecConfig) - (decodeDisk codecConfig) diff --git a/ouroboros-consensus/src/unstable-consensus-testlib/Test/Util/Serialisation/SomeResult.hs b/ouroboros-consensus/src/unstable-consensus-testlib/Test/Util/Serialisation/SomeResult.hs index 6ee2916889..dc096ba4ee 100644 --- a/ouroboros-consensus/src/unstable-consensus-testlib/Test/Util/Serialisation/SomeResult.hs +++ b/ouroboros-consensus/src/unstable-consensus-testlib/Test/Util/Serialisation/SomeResult.hs @@ -13,7 +13,7 @@ import Ouroboros.Consensus.Ledger.Query (BlockQuery) -- need them in the tests. data SomeResult blk where SomeResult :: (Eq result, Show result, Typeable result) - => BlockQuery blk result -> result -> SomeResult blk + => BlockQuery blk fp result -> result -> SomeResult blk instance Show (SomeResult blk) where show (SomeResult _ result) = show result diff --git a/ouroboros-consensus/src/unstable-consensus-testlib/Test/Util/TestBlock.hs b/ouroboros-consensus/src/unstable-consensus-testlib/Test/Util/TestBlock.hs index 5ed7674f03..a973f4788b 100644 --- a/ouroboros-consensus/src/unstable-consensus-testlib/Test/Util/TestBlock.hs +++ b/ouroboros-consensus/src/unstable-consensus-testlib/Test/Util/TestBlock.hs @@ -3,6 +3,7 @@ {-# LANGUAGE DeriveAnyClass #-} {-# LANGUAGE DeriveGeneric #-} {-# LANGUAGE DerivingVia #-} +{-# LANGUAGE EmptyCase #-} {-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE FlexibleInstances #-} {-# LANGUAGE GADTs #-} @@ -11,6 +12,7 @@ {-# LANGUAGE MultiParamTypeClasses #-} {-# LANGUAGE NamedFieldPuns #-} {-# LANGUAGE PatternSynonyms #-} +{-# LANGUAGE QuantifiedConstraints #-} {-# LANGUAGE RecordWildCards #-} {-# LANGUAGE ScopedTypeVariables #-} {-# LANGUAGE StandaloneDeriving #-} @@ -28,6 +30,7 @@ module Test.Util.TestBlock ( , BlockQuery (..) , CodecConfig (..) , Header (..) + , LedgerTables (..) , StorageConfig (..) , TestBlockError (..) , TestBlockWith (tbPayload, tbSlot, tbValid) @@ -44,13 +47,13 @@ module Test.Util.TestBlock ( , firstBlock , successorBlock -- ** Payload semantics + , PayloadDependentState (..) , PayloadSemantics (..) , applyDirectlyToPayloadDependentState -- * LedgerState - , LedgerState (TestLedger) - , Ticked (TickedTestLedger) - , lastAppliedPoint - , payloadDependentState + , LedgerState (TestLedger, payloadDependentState, lastAppliedPoint) + , Ticked1 (TickedTestLedger) + , getTickedTestLedger -- * Chain , BlockChain (..) , blockChain @@ -106,6 +109,7 @@ import Data.Tree (Tree (..)) import qualified Data.Tree as Tree import Data.TreeDiff (ToExpr) import Data.Typeable (Typeable) +import Data.Void (Void) import Data.Word import GHC.Generics (Generic) import NoThunks.Class (NoThunks) @@ -121,6 +125,7 @@ import Ouroboros.Consensus.Ledger.Extended import Ouroboros.Consensus.Ledger.Inspect import Ouroboros.Consensus.Ledger.Query import Ouroboros.Consensus.Ledger.SupportsProtocol +import Ouroboros.Consensus.Ledger.Tables.Utils import Ouroboros.Consensus.Node.NetworkProtocolVersion import Ouroboros.Consensus.Node.ProtocolInfo import Ouroboros.Consensus.NodeId @@ -375,12 +380,18 @@ class ( Typeable ptype , Eq ptype , NoThunks ptype - , Eq (PayloadDependentState ptype) - , Show (PayloadDependentState ptype) - , Generic (PayloadDependentState ptype) - , ToExpr (PayloadDependentState ptype) - , Serialise (PayloadDependentState ptype) - , NoThunks (PayloadDependentState ptype) + , forall mk. EqMK mk => Eq (PayloadDependentState ptype mk) + , forall mk. NoThunksMK mk => NoThunks (PayloadDependentState ptype mk) + , forall mk. ShowMK mk => Show (PayloadDependentState ptype mk) + + , forall mk. Generic (PayloadDependentState ptype mk) + , Serialise (PayloadDependentState ptype EmptyMK) + + + , HasLedgerTables (LedgerState (TestBlockWith ptype)) + , HasLedgerTables (Ticked1 (LedgerState (TestBlockWith ptype))) + , CanStowLedgerTables (LedgerState (TestBlockWith ptype)) + , CanSerializeLedgerTables (LedgerState (TestBlockWith ptype)) , Eq (PayloadDependentError ptype) , Show (PayloadDependentError ptype) @@ -390,33 +401,44 @@ class ( Typeable ptype , NoThunks (PayloadDependentError ptype) , NoThunks (CodecConfig (TestBlockWith ptype)) + , NoThunks (StorageConfig (TestBlockWith ptype)) ) => PayloadSemantics ptype where - type PayloadDependentState ptype :: Type + data PayloadDependentState ptype (mk :: MapKind) :: Type type PayloadDependentError ptype :: Type applyPayload :: - PayloadDependentState ptype + PayloadDependentState ptype ValuesMK -> ptype - -> Either (PayloadDependentError ptype) (PayloadDependentState ptype) + -> Either (PayloadDependentError ptype) (PayloadDependentState ptype TrackingMK) + + -- | This function is used to implement the 'getBlockKeySets' function of the + -- 'ApplyBlock' class. Thus we assume that the payload contains all the + -- information needed to determine which keys should be retrieved from the + -- backing store to apply a 'TestBlockWith'. + getPayloadKeySets :: ptype -> LedgerTables (LedgerState (TestBlockWith ptype)) KeysMK instance PayloadSemantics () where - type PayloadDependentState () = () + data PayloadDependentState () mk = EmptyPLDS + deriving stock (Eq, Show, Generic) + deriving anyclass (Serialise, NoThunks) type PayloadDependentError () = () - applyPayload _ _ = Right () + applyPayload _ _ = Right EmptyPLDS + + getPayloadKeySets = const trivialLedgerTables -- | Apply the payload directly to the payload dependent state portion of a -- ticked state, leaving the rest of the input ticked state unaltered. applyDirectlyToPayloadDependentState :: PayloadSemantics ptype - => Ticked (LedgerState (TestBlockWith ptype)) + => Ticked1 (LedgerState (TestBlockWith ptype)) ValuesMK -> ptype -> Either (PayloadDependentError ptype) - (Ticked (LedgerState (TestBlockWith ptype))) + (Ticked1 (LedgerState (TestBlockWith ptype)) TrackingMK) applyDirectlyToPayloadDependentState (TickedTestLedger st) tx = do payloadDepSt' <- applyPayload (payloadDependentState st) tx pure $ TickedTestLedger $ st { payloadDependentState = payloadDepSt' } @@ -488,6 +510,21 @@ instance ( Typeable ptype signKey :: SlotNo -> SignKeyDSIGN MockDSIGN signKey (SlotNo n) = SignKeyMockDSIGN $ n `mod` numCore +type instance Key (LedgerState TestBlock) = Void +type instance Value (LedgerState TestBlock) = Void + +instance HasLedgerTables (LedgerState TestBlock) where +instance HasLedgerTables (Ticked1 (LedgerState TestBlock)) where + +instance LedgerTablesAreTrivial (LedgerState TestBlock) where + convertMapKind (TestLedger x EmptyPLDS) = TestLedger x EmptyPLDS +instance LedgerTablesAreTrivial (Ticked1 (LedgerState TestBlock)) where + convertMapKind (TickedTestLedger x) = TickedTestLedger $ convertMapKind x + +instance CanSerializeLedgerTables (LedgerState TestBlock) + +instance CanStowLedgerTables (LedgerState TestBlock) + instance PayloadSemantics ptype => ApplyBlock (LedgerState (TestBlockWith ptype)) (TestBlockWith ptype) where applyBlockLedgerResult _ tb@TestBlockWith{..} (TickedTestLedger TestLedger{..}) @@ -499,6 +536,7 @@ instance PayloadSemantics ptype = case applyPayload payloadDependentState tbPayload of Left err -> throwError $ InvalidPayload err Right st' -> return $ pureLedgerResult + $ forgetTrackingValues $ TestLedger { lastAppliedPoint = Chain.blockPoint tb , payloadDependentState = st' @@ -508,40 +546,52 @@ instance PayloadSemantics ptype case applyPayload payloadDependentState tbPayload of Left err -> error $ "Found an error when reapplying a block: " ++ show err Right st' -> pureLedgerResult + $ forgetTrackingValues $ TestLedger { lastAppliedPoint = Chain.blockPoint tb , payloadDependentState = st' } -data instance LedgerState (TestBlockWith ptype) = + getBlockKeySets = getPayloadKeySets . tbPayload + +data instance LedgerState (TestBlockWith ptype) mk = TestLedger { -- | The ledger state simply consists of the last applied block lastAppliedPoint :: Point (TestBlockWith ptype) -- | State that depends on the application of the block payload to the -- state. - , payloadDependentState :: PayloadDependentState ptype + , payloadDependentState :: PayloadDependentState ptype mk } -deriving stock instance PayloadSemantics ptype => Show (LedgerState (TestBlockWith ptype)) -deriving stock instance PayloadSemantics ptype => Eq (LedgerState (TestBlockWith ptype)) -deriving stock instance Generic (LedgerState (TestBlockWith ptype)) +deriving stock instance (ShowMK mk, PayloadSemantics ptype) + => Show (LedgerState (TestBlockWith ptype) mk) -deriving anyclass instance PayloadSemantics ptype => Serialise (LedgerState (TestBlockWith ptype)) -deriving anyclass instance PayloadSemantics ptype => NoThunks (LedgerState (TestBlockWith ptype)) -deriving anyclass instance PayloadSemantics ptype => ToExpr (LedgerState (TestBlockWith ptype)) +deriving stock instance Eq (PayloadDependentState ptype mk) + => Eq (LedgerState (TestBlockWith ptype) mk) -testInitLedgerWithState :: PayloadDependentState ptype -> LedgerState (TestBlockWith ptype) +deriving stock instance Generic (LedgerState (TestBlockWith ptype) mk) + +deriving anyclass instance PayloadSemantics ptype => + Serialise (LedgerState (TestBlockWith ptype) EmptyMK) +deriving anyclass instance NoThunks (PayloadDependentState ptype mk) => + NoThunks (LedgerState (TestBlockWith ptype) mk) + +testInitLedgerWithState :: + PayloadDependentState ptype mk -> LedgerState (TestBlockWith ptype) mk testInitLedgerWithState = TestLedger GenesisPoint -- Ticking has no effect -newtype instance Ticked (LedgerState (TestBlockWith ptype)) = TickedTestLedger { - getTickedTestLedger :: LedgerState (TestBlockWith ptype) +newtype instance Ticked1 (LedgerState (TestBlockWith ptype)) mk = TickedTestLedger { + getTickedTestLedger :: LedgerState (TestBlockWith ptype) mk } - deriving stock (Generic, Show) - deriving newtype (NoThunks, ToExpr, Eq) -testInitExtLedgerWithState :: PayloadDependentState ptype -> ExtLedgerState (TestBlockWith ptype) +deriving stock instance Generic (Ticked1 (LedgerState (TestBlockWith ptype)) mk) +deriving anyclass instance (NoThunksMK mk, NoThunks (PayloadDependentState ptype mk)) + => NoThunks (Ticked1 (LedgerState (TestBlockWith ptype)) mk) + +testInitExtLedgerWithState :: + PayloadDependentState ptype mk -> ExtLedgerState (TestBlockWith ptype) mk testInitExtLedgerWithState st = ExtLedgerState { ledgerState = testInitLedgerWithState st , headerState = genesisHeaderState () @@ -564,7 +614,7 @@ type instance LedgerCfg (LedgerState (TestBlockWith ptype)) = TestBlockLedgerCon instance GetTip (LedgerState (TestBlockWith ptype)) where getTip = castPoint . lastAppliedPoint -instance GetTip (Ticked (LedgerState (TestBlockWith ptype))) where +instance GetTip (Ticked1 (LedgerState (TestBlockWith ptype))) where getTip = castPoint . lastAppliedPoint . getTickedTestLedger instance PayloadSemantics ptype => IsLedger (LedgerState (TestBlockWith ptype)) where @@ -573,7 +623,9 @@ instance PayloadSemantics ptype => IsLedger (LedgerState (TestBlockWith ptype)) type AuxLedgerEvent (LedgerState (TestBlockWith ptype)) = VoidLedgerEvent (LedgerState (TestBlockWith ptype)) - applyChainTickLedgerResult _ _ = pureLedgerResult . TickedTestLedger + applyChainTickLedgerResult _ _ = pureLedgerResult + . TickedTestLedger + . noNewTickingDiffs instance PayloadSemantics ptype => UpdateLedger (TestBlockWith ptype) @@ -649,27 +701,32 @@ instance HasHardForkHistory TestBlock where type HardForkIndices TestBlock = '[TestBlock] hardForkSummary = neverForksHardForkSummary tblcHardForkParams -data instance BlockQuery TestBlock result where - QueryLedgerTip :: BlockQuery TestBlock (Point TestBlock) +data instance BlockQuery TestBlock fp result where + QueryLedgerTip :: BlockQuery TestBlock QFNoTables (Point TestBlock) instance BlockSupportsLedgerQuery TestBlock where - answerBlockQuery _cfg QueryLedgerTip (ExtLedgerState TestLedger { lastAppliedPoint } _) = - lastAppliedPoint + answerPureBlockQuery _cfg QueryLedgerTip dlv = + let + TestLedger{ lastAppliedPoint } = ledgerState dlv + in + lastAppliedPoint + answerBlockQueryLookup _cfg q = case q of {} + answerBlockQueryTraverse _cfg q = case q of {} -instance SameDepIndex (BlockQuery TestBlock) where - sameDepIndex QueryLedgerTip QueryLedgerTip = Just Refl +instance SameDepIndex2 (BlockQuery TestBlock) where + sameDepIndex2 QueryLedgerTip QueryLedgerTip = Just Refl -deriving instance Eq (BlockQuery TestBlock result) -deriving instance Show (BlockQuery TestBlock result) +deriving instance Eq (BlockQuery TestBlock fp result) +deriving instance Show (BlockQuery TestBlock fp result) -instance ShowQuery (BlockQuery TestBlock) where +instance ShowQuery (BlockQuery TestBlock fp) where showResult QueryLedgerTip = show -testInitLedger :: LedgerState TestBlock -testInitLedger = testInitLedgerWithState () +testInitLedger :: LedgerState TestBlock ValuesMK +testInitLedger = testInitLedgerWithState EmptyPLDS -testInitExtLedger :: ExtLedgerState TestBlock -testInitExtLedger = testInitExtLedgerWithState () +testInitExtLedger :: ExtLedgerState TestBlock ValuesMK +testInitExtLedger = testInitExtLedgerWithState EmptyPLDS -- | Trivial test configuration with a single core node singleNodeTestConfig :: TopLevelConfig TestBlock @@ -840,7 +897,7 @@ instance Serialise (AnnTip (TestBlockWith ptype)) where encode = defaultEncodeAnnTip encode decode = defaultDecodeAnnTip decode -instance PayloadSemantics ptype => Serialise (ExtLedgerState (TestBlockWith ptype)) where +instance PayloadSemantics ptype => Serialise (ExtLedgerState (TestBlockWith ptype) EmptyMK) where encode = encodeExtLedgerState encode encode encode decode = decodeExtLedgerState decode decode decode @@ -881,8 +938,8 @@ instance DecodeDisk (TestBlockWith ptype) (AnnTip (TestBlockWith ptype)) instance ReconstructNestedCtxt Header (TestBlockWith ptype) -instance PayloadSemantics ptype => EncodeDisk (TestBlockWith ptype) (LedgerState (TestBlockWith ptype)) -instance PayloadSemantics ptype => DecodeDisk (TestBlockWith ptype) (LedgerState (TestBlockWith ptype)) +instance PayloadSemantics ptype => EncodeDisk (TestBlockWith ptype) (LedgerState (TestBlockWith ptype) EmptyMK) +instance PayloadSemantics ptype => DecodeDisk (TestBlockWith ptype) (LedgerState (TestBlockWith ptype) EmptyMK) instance Serialise ptype => EncodeDiskDep (NestedCtxt Header) (TestBlockWith ptype) instance Serialise ptype => DecodeDiskDep (NestedCtxt Header) (TestBlockWith ptype) diff --git a/ouroboros-consensus/src/unstable-mempool-test-utils/Test/Consensus/Mempool/Mocked.hs b/ouroboros-consensus/src/unstable-mempool-test-utils/Test/Consensus/Mempool/Mocked.hs index f0c2894650..2fa40d340b 100644 --- a/ouroboros-consensus/src/unstable-mempool-test-utils/Test/Consensus/Mempool/Mocked.hs +++ b/ouroboros-consensus/src/unstable-mempool-test-utils/Test/Consensus/Mempool/Mocked.hs @@ -16,13 +16,18 @@ module Test.Consensus.Mempool.Mocked ( ) where import Control.Concurrent.Class.MonadSTM.Strict (StrictTVar, - atomically, newTVarIO, readTVar, writeTVar) + atomically, newTVarIO, readTVar, readTVarIO, writeTVar) import Control.DeepSeq (NFData (rnf)) import Control.Tracer (Tracer) +import Data.Foldable (Foldable (foldMap')) +import qualified Data.List.NonEmpty as NE +import Ouroboros.Consensus.Block (castPoint) import Ouroboros.Consensus.HeaderValidation as Header -import Ouroboros.Consensus.Ledger.Basics (LedgerState) +import Ouroboros.Consensus.Ledger.Basics import qualified Ouroboros.Consensus.Ledger.Basics as Ledger import qualified Ouroboros.Consensus.Ledger.SupportsMempool as Ledger +import Ouroboros.Consensus.Ledger.Tables.Utils (forgetLedgerTables, + restrictValues') import Ouroboros.Consensus.Mempool (Mempool) import qualified Ouroboros.Consensus.Mempool as Mempool import Ouroboros.Consensus.Mempool.API (AddTxOnBehalfOf, @@ -30,7 +35,7 @@ import Ouroboros.Consensus.Mempool.API (AddTxOnBehalfOf, data MockedMempool m blk = MockedMempool { getLedgerInterface :: !(Mempool.LedgerInterface m blk) - , getLedgerStateTVar :: !(StrictTVar m (LedgerState blk)) + , getLedgerStateTVar :: !(StrictTVar m (LedgerState blk ValuesMK)) , getMempool :: !(Mempool m blk) } @@ -47,7 +52,7 @@ instance NFData (MockedMempool m blk) where data InitialMempoolAndModelParams blk = MempoolAndModelParams { -- | Initial ledger state for the mocked Ledger DB interface. - immpInitialState :: !(Ledger.LedgerState blk) + immpInitialState :: !(Ledger.LedgerState blk ValuesMK) -- | Ledger configuration, which is needed to open the mempool. , immpLedgerConfig :: !(Ledger.LedgerConfig blk) } @@ -64,7 +69,13 @@ openMockedMempool :: openMockedMempool capacityOverride tracer initialParams = do currentLedgerStateTVar <- newTVarIO (immpInitialState initialParams) let ledgerItf = Mempool.LedgerInterface { - Mempool.getCurrentLedgerState = readTVar currentLedgerStateTVar + Mempool.getCurrentLedgerState = forgetLedgerTables <$> readTVar currentLedgerStateTVar + , Mempool.getLedgerTablesAtFor = \pt txs -> do + let keys = foldMap' Ledger.getTransactionKeySets txs + st <- readTVarIO currentLedgerStateTVar + if castPoint (getTip st) == pt + then pure $ Just $ restrictValues' st keys + else pure Nothing } mempool <- Mempool.openMempoolWithoutSyncThread ledgerItf @@ -79,7 +90,7 @@ openMockedMempool capacityOverride tracer initialParams = do setLedgerState :: MockedMempool IO blk - -> LedgerState blk + -> LedgerState blk ValuesMK -> IO () setLedgerState MockedMempool {getLedgerStateTVar} newSt = atomically $ writeTVar getLedgerStateTVar newSt @@ -93,7 +104,7 @@ addTx = Mempool.addTx . getMempool removeTxs :: MockedMempool m blk - -> [Ledger.GenTxId blk] + -> NE.NonEmpty (Ledger.GenTxId blk) -> m () removeTxs = Mempool.removeTxs . getMempool diff --git a/ouroboros-consensus/src/unstable-mock-block/Ouroboros/Consensus/Mock/Ledger/Block.hs b/ouroboros-consensus/src/unstable-mock-block/Ouroboros/Consensus/Mock/Ledger/Block.hs index f6db8e90cf..336d8cba7c 100644 --- a/ouroboros-consensus/src/unstable-mock-block/Ouroboros/Consensus/Mock/Ledger/Block.hs +++ b/ouroboros-consensus/src/unstable-mock-block/Ouroboros/Consensus/Mock/Ledger/Block.hs @@ -2,12 +2,14 @@ {-# LANGUAGE DeriveAnyClass #-} {-# LANGUAGE DeriveGeneric #-} {-# LANGUAGE DerivingStrategies #-} +{-# LANGUAGE EmptyCase #-} {-# LANGUAGE EmptyDataDeriving #-} {-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE FlexibleInstances #-} {-# LANGUAGE GADTs #-} {-# LANGUAGE GeneralizedNewtypeDeriving #-} {-# LANGUAGE MultiParamTypeClasses #-} +{-# LANGUAGE NamedFieldPuns #-} {-# LANGUAGE RankNTypes #-} {-# LANGUAGE RecordWildCards #-} {-# LANGUAGE ScopedTypeVariables #-} @@ -42,7 +44,9 @@ module Ouroboros.Consensus.Mock.Ledger.Block ( , MockProtocolSpecific (..) -- * 'UpdateLedger' , LedgerState (..) + , LedgerTables (..) , Ticked (..) + , Ticked1 (..) , genesisSimpleLedgerState , updateSimpleLedgerState -- * 'ApplyTx' (mempool support) @@ -71,6 +75,7 @@ import qualified Codec.CBOR.Encoding as CBOR import Codec.Serialise (Serialise (..), serialise) import Control.Monad.Except import qualified Data.ByteString.Lazy as Lazy +import Data.Foldable (foldMap') import Data.Kind (Type) import Data.Proxy import Data.Typeable @@ -89,13 +94,13 @@ import Ouroboros.Consensus.Ledger.Inspect import Ouroboros.Consensus.Ledger.Query import Ouroboros.Consensus.Ledger.SupportsMempool import Ouroboros.Consensus.Ledger.SupportsPeerSelection +import Ouroboros.Consensus.Ledger.Tables.Utils import Ouroboros.Consensus.Mock.Ledger.Address import Ouroboros.Consensus.Mock.Ledger.State import qualified Ouroboros.Consensus.Mock.Ledger.UTxO as Mock import Ouroboros.Consensus.Storage.Common (BinaryBlockInfo (..), SizeInBytes) -import Ouroboros.Consensus.Util (ShowProxy (..), hashFromBytesShortE, - (..:)) +import Ouroboros.Consensus.Util (ShowProxy (..), hashFromBytesShortE) import Ouroboros.Consensus.Util.Condense import Test.Util.Orphans.Serialise () @@ -340,22 +345,34 @@ deriving instance NoThunks (MockLedgerConfig c ext) type instance LedgerCfg (LedgerState (SimpleBlock c ext)) = SimpleLedgerConfig c ext instance GetTip (LedgerState (SimpleBlock c ext)) where - getTip (SimpleLedgerState st) = castPoint $ mockTip st + getTip (SimpleLedgerState st _) = castPoint $ mockTip st -instance GetTip (Ticked (LedgerState (SimpleBlock c ext))) where +instance GetTip (Ticked1 (LedgerState (SimpleBlock c ext))) where getTip = castPoint . getTip . getTickedSimpleLedgerState instance MockProtocolSpecific c ext => IsLedger (LedgerState (SimpleBlock c ext)) where type LedgerErr (LedgerState (SimpleBlock c ext)) = MockError (SimpleBlock c ext) - type AuxLedgerEvent (LedgerState (SimpleBlock c ext)) = VoidLedgerEvent (SimpleBlock c ext) + type AuxLedgerEvent (LedgerState (SimpleBlock c ext)) = VoidLedgerEvent (LedgerState (SimpleBlock c ext)) - applyChainTickLedgerResult _ _ = pureLedgerResult . TickedSimpleLedgerState + applyChainTickLedgerResult _ _ = pureLedgerResult + . TickedSimpleLedgerState + . flip SimpleLedgerState emptyLedgerTables + . simpleLedgerState instance MockProtocolSpecific c ext => ApplyBlock (LedgerState (SimpleBlock c ext)) (SimpleBlock c ext) where - applyBlockLedgerResult = fmap pureLedgerResult ..: updateSimpleLedgerState + applyBlockLedgerResult a blk st = + fmap ( pureLedgerResult + . forgetTrackingValues + . calculateDifference st + . unstowLedgerTables + ) + . updateSimpleLedgerState a blk + . TickedSimpleLedgerState + . stowLedgerTables + $ getTickedSimpleLedgerState st reapplyBlockLedgerResult = (mustSucceed . runExcept) ..: applyBlockLedgerResult @@ -363,46 +380,75 @@ instance MockProtocolSpecific c ext mustSucceed (Left err) = error ("reapplyBlockLedgerResult: unexpected error: " <> show err) mustSucceed (Right st) = st -newtype instance LedgerState (SimpleBlock c ext) = SimpleLedgerState { + getBlockKeySets SimpleBlock{simpleBody = SimpleBody txs} = + foldMap' id + [ LedgerTables $ KeysMK ins | Mock.Tx _ ins _ <- txs ] + +data instance LedgerState (SimpleBlock c ext) mk = SimpleLedgerState { simpleLedgerState :: MockState (SimpleBlock c ext) + , simpleLedgerTables :: LedgerTables (LedgerState (SimpleBlock c ext)) mk } - deriving stock (Generic, Show, Eq) - deriving newtype (Serialise, NoThunks) + deriving stock (Generic) + +deriving instance ( SimpleCrypto c + , Typeable ext + , Eq (mk Mock.TxIn Mock.TxOut) + ) + => Eq (LedgerState (SimpleBlock c ext) mk) +deriving instance ( SimpleCrypto c + , Typeable ext + , NoThunks (mk Mock.TxIn Mock.TxOut) + ) + => NoThunks (LedgerState (SimpleBlock c ext) mk) +deriving instance ( SimpleCrypto c + , Typeable ext + , Show (mk Mock.TxIn Mock.TxOut) + ) + => Show (LedgerState (SimpleBlock c ext) mk) -- Ticking has no effect on the simple ledger state -newtype instance Ticked (LedgerState (SimpleBlock c ext)) = TickedSimpleLedgerState { - getTickedSimpleLedgerState :: LedgerState (SimpleBlock c ext) +newtype instance Ticked1 (LedgerState (SimpleBlock c ext)) mk = TickedSimpleLedgerState { + getTickedSimpleLedgerState :: LedgerState (SimpleBlock c ext) mk } - deriving stock (Generic, Show, Eq) - deriving newtype (NoThunks) + deriving (Generic) + +deriving anyclass instance ( SimpleCrypto c + , Typeable ext + ) + => NoThunks (Ticked1 (LedgerState (SimpleBlock c ext)) TrackingMK) +deriving instance ( SimpleCrypto c + , Typeable ext + , Show (LedgerState (SimpleBlock c ext) mk) + ) + => Show (Ticked1 (LedgerState (SimpleBlock c ext)) mk) instance MockProtocolSpecific c ext => UpdateLedger (SimpleBlock c ext) updateSimpleLedgerState :: (SimpleCrypto c, Typeable ext) => LedgerConfig (SimpleBlock c ext) -> SimpleBlock c ext - -> TickedLedgerState (SimpleBlock c ext) + -> TickedLedgerState (SimpleBlock c ext) mk1 -> Except (MockError (SimpleBlock c ext)) - (LedgerState (SimpleBlock c ext)) -updateSimpleLedgerState cfg b (TickedSimpleLedgerState (SimpleLedgerState st)) = - SimpleLedgerState <$> updateMockState mockCfg b st - where - mockCfg = simpleLedgerMockConfig cfg + (LedgerState (SimpleBlock c ext) mk1) +updateSimpleLedgerState cfg b (TickedSimpleLedgerState (SimpleLedgerState st tbs)) = + flip SimpleLedgerState tbs <$> updateMockState (simpleLedgerMockConfig cfg) b st updateSimpleUTxO :: Mock.HasMockTxs a => LedgerConfig (SimpleBlock c ext) -> SlotNo -> a - -> TickedLedgerState (SimpleBlock c ext) + -> TickedLedgerState (SimpleBlock c ext) EmptyMK -> Except (MockError (SimpleBlock c ext)) - (TickedLedgerState (SimpleBlock c ext)) -updateSimpleUTxO cfg x slot (TickedSimpleLedgerState (SimpleLedgerState st)) = - TickedSimpleLedgerState . SimpleLedgerState <$> updateMockUTxO mockCfg x slot st - where - mockCfg = simpleLedgerMockConfig cfg + (TickedLedgerState (SimpleBlock c ext) EmptyMK) +updateSimpleUTxO cfg slot x (TickedSimpleLedgerState (SimpleLedgerState st tbs)) = + TickedSimpleLedgerState . flip SimpleLedgerState tbs + <$> updateMockUTxO (simpleLedgerMockConfig cfg) slot x st -genesisSimpleLedgerState :: AddrDist -> LedgerState (SimpleBlock c ext) -genesisSimpleLedgerState = SimpleLedgerState . genesisMockState +genesisSimpleLedgerState :: AddrDist -> LedgerState (SimpleBlock c ext) ValuesMK +genesisSimpleLedgerState = + unstowLedgerTables + . flip SimpleLedgerState emptyLedgerTables + . genesisMockState -- | Dummy values instance MockProtocolSpecific c ext => CommonProtocolParams (SimpleBlock c ext) where @@ -412,6 +458,51 @@ instance MockProtocolSpecific c ext => CommonProtocolParams (SimpleBlock c ext) instance LedgerSupportsPeerSelection (SimpleBlock c ext) where getPeers = const [] +{------------------------------------------------------------------------------- + LedgerTables +-------------------------------------------------------------------------------} + +type instance Key (LedgerState (SimpleBlock c ext)) = Mock.TxIn +type instance Value (LedgerState (SimpleBlock c ext)) = Mock.TxOut + +instance HasLedgerTables (LedgerState (SimpleBlock c ext)) where + projectLedgerTables = simpleLedgerTables + withLedgerTables (SimpleLedgerState s _) = SimpleLedgerState s + +instance HasLedgerTables (Ticked1 (LedgerState (SimpleBlock c ext))) where + projectLedgerTables = castLedgerTables + . simpleLedgerTables + . getTickedSimpleLedgerState + withLedgerTables (TickedSimpleLedgerState st) tables = + TickedSimpleLedgerState $ withLedgerTables st $ castLedgerTables tables + +instance CanSerializeLedgerTables (LedgerState (SimpleBlock c ext)) + +instance CanStowLedgerTables (LedgerState (SimpleBlock c ext)) where + stowLedgerTables st = + SimpleLedgerState { + simpleLedgerState = simpleLedgerState { mockUtxo = m } + , simpleLedgerTables = emptyLedgerTables + } + where + SimpleLedgerState { + simpleLedgerState + , simpleLedgerTables = LedgerTables (ValuesMK m) + } = st + + unstowLedgerTables st = + SimpleLedgerState { + simpleLedgerState = simpleLedgerState { mockUtxo = mempty } + , simpleLedgerTables = + LedgerTables (ValuesMK (mockUtxo simpleLedgerState)) + } + where + SimpleLedgerState { + simpleLedgerState + } = st + +deriving newtype instance CanStowLedgerTables (Ticked1 (LedgerState (SimpleBlock c ext))) + {------------------------------------------------------------------------------- Support for the mempool -------------------------------------------------------------------------------} @@ -436,13 +527,21 @@ type instance ApplyTxErr (SimpleBlock c ext) = MockError (SimpleBlock c ext) instance MockProtocolSpecific c ext => LedgerSupportsMempool (SimpleBlock c ext) where applyTx cfg _wti slot tx st = do - st' <- updateSimpleUTxO cfg slot tx st - return (st', ValidatedSimpleGenTx tx) - reapplyTx cfg slot vtx st = - updateSimpleUTxO cfg slot (forgetValidatedSimpleGenTx vtx) st + let st' = stowLedgerTables st + st'' <- unstowLedgerTables + <$> updateSimpleUTxO cfg slot tx st' + return ( forgetTrackingValues $ calculateDifference st st'' + , ValidatedSimpleGenTx tx ) + + reapplyTx cfg slot vtx st = applyDiffs st . fst + <$> applyTx cfg DoNotIntervene slot (forgetValidatedSimpleGenTx vtx) st txForgetValidated = forgetValidatedSimpleGenTx + getTransactionKeySets tx = + let Mock.Tx _ ins _ = simpleGenTx tx + in LedgerTables $ KeysMK ins + instance TxLimits (SimpleBlock c ext) where type TxMeasure (SimpleBlock c ext) = IgnoringOverflow ByteSize32 @@ -506,28 +605,30 @@ genTxSize :: GenTx (SimpleBlock c ext) -> ByteSize32 genTxSize = txSize . simpleGenTx {------------------------------------------------------------------------------- - Support for QueryLedger + Support for BlockSupportsLedgerQuery -------------------------------------------------------------------------------} -data instance BlockQuery (SimpleBlock c ext) result where - QueryLedgerTip :: BlockQuery (SimpleBlock c ext) (Point (SimpleBlock c ext)) +data instance BlockQuery (SimpleBlock c ext) fp result where + QueryLedgerTip :: BlockQuery (SimpleBlock c ext) QFNoTables (Point (SimpleBlock c ext)) instance MockProtocolSpecific c ext => BlockSupportsLedgerQuery (SimpleBlock c ext) where - answerBlockQuery _cfg QueryLedgerTip = + answerPureBlockQuery _cfg QueryLedgerTip = castPoint . ledgerTipPoint . ledgerState + answerBlockQueryLookup _cfg q = case q of {} + answerBlockQueryTraverse _cfg q = case q of {} -instance SameDepIndex (BlockQuery (SimpleBlock c ext)) where - sameDepIndex QueryLedgerTip QueryLedgerTip = Just Refl +instance SameDepIndex2 (BlockQuery (SimpleBlock c ext)) where + sameDepIndex2 QueryLedgerTip QueryLedgerTip = Just Refl -deriving instance Show (BlockQuery (SimpleBlock c ext) result) +deriving instance Show (BlockQuery (SimpleBlock c ext) fp result) instance (Typeable c, Typeable ext) => ShowProxy (BlockQuery (SimpleBlock c ext)) where instance (SimpleCrypto c, Typeable ext) - => ShowQuery (BlockQuery (SimpleBlock c ext)) where + => ShowQuery (BlockQuery (SimpleBlock c ext) fp) where showResult QueryLedgerTip = show {------------------------------------------------------------------------------- diff --git a/ouroboros-consensus/src/unstable-mock-block/Ouroboros/Consensus/Mock/Ledger/Forge.hs b/ouroboros-consensus/src/unstable-mock-block/Ouroboros/Consensus/Mock/Ledger/Forge.hs index a233ebc7a9..a1609f5329 100644 --- a/ouroboros-consensus/src/unstable-mock-block/Ouroboros/Consensus/Mock/Ledger/Forge.hs +++ b/ouroboros-consensus/src/unstable-mock-block/Ouroboros/Consensus/Mock/Ledger/Forge.hs @@ -36,14 +36,14 @@ newtype ForgeExt c ext = ForgeExt { -> SimpleBlock c ext } -forgeSimple :: forall c ext. +forgeSimple :: forall c ext mk. ( SimpleCrypto c ) => ForgeExt c ext -> TopLevelConfig (SimpleBlock c ext) -> BlockNo -- ^ Current block number -> SlotNo -- ^ Current slot number - -> TickedLedgerState (SimpleBlock c ext) -- ^ Current ledger + -> TickedLedgerState (SimpleBlock c ext) mk -- ^ Current ledger -> [GenTx (SimpleBlock c ext)] -- ^ Txs to include -> IsLeader (BlockProtocol (SimpleBlock c ext)) -> SimpleBlock c ext diff --git a/ouroboros-consensus/src/unstable-mock-block/Ouroboros/Consensus/Mock/Node/Serialisation.hs b/ouroboros-consensus/src/unstable-mock-block/Ouroboros/Consensus/Mock/Node/Serialisation.hs index 63482abd63..476fcea081 100644 --- a/ouroboros-consensus/src/unstable-mock-block/Ouroboros/Consensus/Mock/Node/Serialisation.hs +++ b/ouroboros-consensus/src/unstable-mock-block/Ouroboros/Consensus/Mock/Node/Serialisation.hs @@ -19,6 +19,7 @@ import Ouroboros.Consensus.Block import Ouroboros.Consensus.HeaderValidation (AnnTip, defaultDecodeAnnTip, defaultEncodeAnnTip) import Ouroboros.Consensus.Ledger.Abstract +import Ouroboros.Consensus.Ledger.Query import Ouroboros.Consensus.Ledger.SupportsMempool import Ouroboros.Consensus.Mock.Ledger import Ouroboros.Consensus.Mock.Node.Abstract @@ -50,8 +51,10 @@ instance Serialise ext => EncodeDisk (MockBlock ext) (Header (MockBlock ext)) instance Serialise ext => DecodeDisk (MockBlock ext) (Lazy.ByteString -> Header (MockBlock ext)) where decodeDisk _ = const <$> decode -instance EncodeDisk (MockBlock ext) (LedgerState (MockBlock ext)) -instance DecodeDisk (MockBlock ext) (LedgerState (MockBlock ext)) +instance EncodeDisk (MockBlock ext) (LedgerState (MockBlock ext) EmptyMK) where + encodeDisk _ = encode . simpleLedgerState +instance DecodeDisk (MockBlock ext) (LedgerState (MockBlock ext) EmptyMK) where + decodeDisk _ = flip SimpleLedgerState (LedgerTables EmptyMK) <$> decode instance EncodeDisk (MockBlock ext) (AnnTip (MockBlock ext)) where encodeDisk _ = defaultEncodeAnnTip encode @@ -106,13 +109,13 @@ instance SerialiseNodeToClient (MockBlock ext) (GenTxId (MockBlock ext)) instance SerialiseNodeToClient (MockBlock ext) (MockError (MockBlock ext)) instance SerialiseNodeToClient (MockBlock ext) SlotNo -instance SerialiseNodeToClient (MockBlock ext) (SomeSecond BlockQuery (MockBlock ext)) where - encodeNodeToClient _ _ (SomeSecond QueryLedgerTip) = encode () - decodeNodeToClient _ _ = (\() -> SomeSecond QueryLedgerTip) <$> decode +instance SerialiseNodeToClient (MockBlock ext) (SomeBlockQuery (BlockQuery (MockBlock ext))) where + encodeNodeToClient _ _ (SomeBlockQuery QueryLedgerTip) = encode () + decodeNodeToClient _ _ = (\() -> SomeBlockQuery QueryLedgerTip) <$> decode -instance SerialiseResult (MockBlock ext) (BlockQuery (MockBlock ext)) where - encodeResult _ _ QueryLedgerTip = encode - decodeResult _ _ QueryLedgerTip = decode +instance SerialiseResult' (MockBlock ext) BlockQuery where + encodeResult' _ _ QueryLedgerTip = encode + decodeResult' _ _ QueryLedgerTip = decode {------------------------------------------------------------------------------- Nested contents diff --git a/ouroboros-consensus/src/unstable-tutorials/Ouroboros/Consensus/Tutorial/Simple.lhs b/ouroboros-consensus/src/unstable-tutorials/Ouroboros/Consensus/Tutorial/Simple.lhs index 69af4e6f8d..3f013bcead 100644 --- a/ouroboros-consensus/src/unstable-tutorials/Ouroboros/Consensus/Tutorial/Simple.lhs +++ b/ouroboros-consensus/src/unstable-tutorials/Ouroboros/Consensus/Tutorial/Simple.lhs @@ -50,7 +50,7 @@ First, some imports we'll need: > HeaderHash, Point, StandardHash) > import Ouroboros.Consensus.Protocol.Abstract > (SecurityParam(..), ConsensusConfig, ConsensusProtocol(..) ) -> import Ouroboros.Consensus.Ticked ( Ticked(TickedTrivial) ) +> import Ouroboros.Consensus.Ticked ( Ticked1, Ticked(TickedTrivial) ) > import Ouroboros.Consensus.Block > (BlockSupportsProtocol (selectView, validateView)) > import Ouroboros.Consensus.Ledger.Abstract @@ -62,6 +62,7 @@ First, some imports we'll need: > import Ouroboros.Consensus.Forecast (trivialForecast) > import Ouroboros.Consensus.HeaderValidation > (ValidateEnvelope, BasicEnvelopeValidation, HasAnnTip) +> import Ouroboros.Consensus.Ledger.Tables Conceptual Overview and Definitions of Key Terms ================================================ @@ -499,6 +500,10 @@ state. Below we'll define a group of typeclasses that together implement a simple ledger that uses `BlockC` and that is suitable for our consensus protocol `SP`. +For this tutorial we will be ignoring the definitions related to UTxO-HD. In +particular one can ignore type variables named `mk`, types of the form `*MK`, +and anything mentioning to tables or `KeySets`. There is an appendix at the end +of this document that briefly outlines UTxO-HD. `LedgerCfg` - Ledger Static Configuration ----------------------------------------- @@ -522,7 +527,7 @@ Given that the `BlockC` transactions consist of incrementing and decrementing a number, we materialize that number in the `LedgerState`. We'll also need to keep track of some information about the most recent block we have seen. -> data instance LedgerState BlockC = +> data instance LedgerState BlockC mk = > > LedgerC > -- the hash and slot number of the most recent block @@ -539,13 +544,13 @@ place in the blockchain - a pair of a slot and a block hash. --------------------------------------- Again, the slot abstraction defines a logical clock - and instances of the -`Ticked` family describe values that evolve with respect to this logical clock. -As such, we will also need to define an instance of `Ticked` for our ledger +`Ticked1` family describe values that evolve with respect to this logical clock. +As such, we will also need to define an instance of `Ticked1` for our ledger state. In our example, this is essentially an `Identity` functor: -> newtype instance Ticked (LedgerState BlockC) = +> newtype instance Ticked1 (LedgerState BlockC) mk = > TickedLedgerStateC -> { unTickedLedgerStateC :: LedgerState BlockC } +> { unTickedLedgerStateC :: LedgerState BlockC mk } > deriving (Show, Eq, Generic, Serialise) @@ -561,7 +566,7 @@ types for a ledger. Though we are here using > > applyChainTickLedgerResult _cfg _slot ldgrSt = > LedgerResult { lrEvents = [] -> , lrResult = TickedLedgerStateC ldgrSt +> , lrResult = TickedLedgerStateC $ convertMapKind ldgrSt > } The `LedgerErr` type is the type of errors associated with this ledger that can @@ -587,7 +592,7 @@ A block `b` is said to have been `applied` to a `LedgerState` if that `LedgerState` is the result of having witnessed `b` at some point. We can express this as a function: -> applyBlockTo :: BlockC -> Ticked (LedgerState BlockC) -> LedgerState BlockC +> applyBlockTo :: BlockC -> Ticked1 (LedgerState BlockC) mk -> LedgerState BlockC mk > applyBlockTo block tickedLedgerState = > ledgerState { lsbc_tip = blockPoint block > , lsbc_count = lsbc_count' @@ -600,7 +605,7 @@ express this as a function: > Inc -> i + 1 > Dec -> i - 1 -We use a `Ticked (LedgerState BlockC)` to enforce the invariant that we should +We use a `Ticked1 (LedgerState BlockC)` to enforce the invariant that we should not apply two blocks in a row - at least one tick (aka slot) must have elapsed between block applications. @@ -611,14 +616,16 @@ the `ApplyBlock` typeclass: > instance ApplyBlock (LedgerState BlockC) BlockC where > applyBlockLedgerResult _ldgrCfg block tickedLdgrSt = > pure $ LedgerResult { lrEvents = [] -> , lrResult = block `applyBlockTo` tickedLdgrSt +> , lrResult = convertMapKind $ block `applyBlockTo` tickedLdgrSt > } > > reapplyBlockLedgerResult _ldgrCfg block tickedLdgrSt = > LedgerResult { lrEvents = [] -> , lrResult = block `applyBlockTo` tickedLdgrSt +> , lrResult = convertMapKind $ block `applyBlockTo` tickedLdgrSt > } > +> getBlockKeySets = const trivialLedgerTables +> > `applyBlockLedgerResult` tries to apply a block to the ledger and fails with a @@ -654,7 +661,7 @@ The `GetTip` typeclass describes how to get the `Point` of the tip - which is the most recently applied block. We need to implement this both for `LedgerState BlockC` as well as its ticked version: -> instance GetTip (Ticked (LedgerState BlockC)) where +> instance GetTip (Ticked1 (LedgerState BlockC)) where > getTip = castPoint . lsbc_tip . unTickedLedgerStateC > instance GetTip (LedgerState BlockC) where @@ -703,6 +710,41 @@ To focus on the salient ideas of this document, we've put all the derivations of > instance NoThunks BlockC > deriving via OnlyCheckWhnfNamed "HdrBlockC" (Header BlockC) > instance NoThunks (Header BlockC) -> deriving via OnlyCheckWhnfNamed "LedgerC" (LedgerState BlockC) -> instance NoThunks (LedgerState BlockC) -> deriving instance NoThunks (Ticked (LedgerState BlockC)) +> deriving via OnlyCheckWhnfNamed "LedgerC" (LedgerState BlockC mk) +> instance NoThunks (LedgerState BlockC mk) + +Appendix: UTxO-HD features +========================== + +The introduction of UTxO-HD is out of the scope of this tutorial but we will +describe here a few hints on how it would be defined. In broad terms, with the +introduction of UTxO-HD a part of the ledger state (the UTxO set) was moved to +the disk and now consensus: + +- provides subsets of that data to the ledger rules (i.e. only the consumed + UTxOs on a block) + +- stores a sequence of deltas (diffs) produced by the execution of the ledger + rules + +These subsets are defined in terms of the `LedgerTables` and the `mk` type +variable that indicates if the collection is made of key-value pairs, only keys +or to keys-delta pairs. + +The `HasLedgerTables` class defines the basic operations that can be done with +the `LedgerTables`. For a Ledger state definition as simple as the one we are +defining there the tables are trivially empty so the operations are all trivial +and we use the default implementation + +> type instance Key (LedgerState BlockC) = Void +> type instance Value (LedgerState BlockC) = Void +> +> instance HasLedgerTables (LedgerState BlockC) +> instance HasLedgerTables (Ticked1 (LedgerState BlockC)) +> instance CanSerializeLedgerTables (LedgerState BlockC) +> instance CanStowLedgerTables (LedgerState BlockC) +> instance LedgerTablesAreTrivial (LedgerState BlockC) where +> convertMapKind (LedgerC x y) = LedgerC x y +> instance LedgerTablesAreTrivial (Ticked1 (LedgerState BlockC)) where +> convertMapKind (TickedLedgerStateC x) = +> TickedLedgerStateC (convertMapKind x) diff --git a/ouroboros-consensus/src/unstable-tutorials/Ouroboros/Consensus/Tutorial/WithEpoch.lhs b/ouroboros-consensus/src/unstable-tutorials/Ouroboros/Consensus/Tutorial/WithEpoch.lhs index bc8345b871..18dfde23ed 100644 --- a/ouroboros-consensus/src/unstable-tutorials/Ouroboros/Consensus/Tutorial/WithEpoch.lhs +++ b/ouroboros-consensus/src/unstable-tutorials/Ouroboros/Consensus/Tutorial/WithEpoch.lhs @@ -56,6 +56,7 @@ And imports, of course: > import Control.Monad () > import Control.Monad.Except (MonadError (throwError)) +> import Data.Void (Void) > import Data.Word (Word64) > import GHC.Generics (Generic) > import NoThunks.Class (NoThunks, OnlyCheckWhnfNamed (..)) @@ -74,7 +75,7 @@ And imports, of course: > import Ouroboros.Consensus.Protocol.Abstract > (ConsensusConfig, SecurityParam, ConsensusProtocol (..)) > -> import Ouroboros.Consensus.Ticked (Ticked) +> import Ouroboros.Consensus.Ticked (Ticked1, Ticked) > import Ouroboros.Consensus.Ledger.Abstract > (LedgerState, LedgerCfg, GetTip, LedgerResult (..), ApplyBlock (..), > UpdateLedger, IsLedger (..)) @@ -88,6 +89,7 @@ And imports, of course: > import Ouroboros.Consensus.Forecast > (Forecast (..), OutsideForecastRange (..)) > import Ouroboros.Consensus.Ledger.Basics (GetTip(..)) +> import Ouroboros.Consensus.Ledger.Tables Epochs @@ -294,7 +296,7 @@ corresponding to `BlockD` needs to hold snapshots of the count at the last two epoch boundaries - this is the `lsbd_snapshot1` and `lsbd_snapshot2` fields below: -> data instance LedgerState BlockD = +> data instance LedgerState BlockD mk = > LedgerD > { lsbd_tip :: Point BlockD -- ^ Point of the last applied block. > -- (Point is header hash and slot no.) @@ -314,7 +316,7 @@ There is no interesting static configuration for this ledger: Our `GetTip` implementation retrieves the tip from the `lsbd_tip` field: -> instance GetTip (Ticked (LedgerState BlockD)) where +> instance GetTip (Ticked1 (LedgerState BlockD)) where > getTip = castPoint . lsbd_tip . unTickedLedgerStateD > instance GetTip (LedgerState BlockD) where @@ -323,12 +325,12 @@ Our `GetTip` implementation retrieves the tip from the `lsbd_tip` field: Ticking ------- -`LedgerState BlockD` also needs a corresponding `Ticked` instance which is still +`LedgerState BlockD` also needs a corresponding `Ticked1` instance which is still very simple: -> newtype instance Ticked (LedgerState BlockD) = +> newtype instance Ticked1 (LedgerState BlockD) mk = > TickedLedgerStateD { -> unTickedLedgerStateD :: LedgerState BlockD +> unTickedLedgerStateD :: LedgerState BlockD mk > } > deriving stock (Show, Eq, Generic) > deriving newtype (NoThunks, Serialise) @@ -336,12 +338,12 @@ very simple: Because the ledger now needs to track the snapshots in `lsbd_snapshot1` and `lsbd_snapshot2` we can express this in terms of ticking a `LedgerState BlockD`. We'll write a function (that we'll use later) to express this relationship -computing the `Ticked (LedgerState BlockD)` resulting from a starting +computing the `Ticked1 (LedgerState BlockD)` resulting from a starting `LedgerState BlockD` being ticked to some slot in the future - assuming no intervening blocks are applied: > tickLedgerStateD :: -> SlotNo -> LedgerState BlockD -> Ticked (LedgerState BlockD) +> SlotNo -> LedgerState BlockD mk -> Ticked1 (LedgerState BlockD) mk > tickLedgerStateD newSlot ldgrSt = > TickedLedgerStateD $ > if isNewEpoch then @@ -376,7 +378,7 @@ We can now use `tickLedgerStateD` to instantiate `IsLedger`: > > applyChainTickLedgerResult _cfg slot ldgrSt = > LedgerResult { lrEvents = [] -> , lrResult = tickLedgerStateD slot ldgrSt +> , lrResult = tickLedgerStateD slot $ convertMapKind ldgrSt > } `UpdateLedger` is necessary but its implementation is always empty: @@ -386,10 +388,10 @@ We can now use `tickLedgerStateD` to instantiate `IsLedger`: Applying Blocks --------------- -Applying a `BlockD` to a `Ticked (LedgerState BlockD)` is (again) the result of +Applying a `BlockD` to a `Ticked1 (LedgerState BlockD)` is (again) the result of applying each individual transaction - exactly as it was in for `BlockC`: -> applyBlockTo :: BlockD -> Ticked (LedgerState BlockD) -> LedgerState BlockD +> applyBlockTo :: BlockD -> Ticked1 (LedgerState BlockD) mk -> LedgerState BlockD mk > applyBlockTo block tickedLedgerState = > ledgerState { lsbd_tip = blockPoint block > , lsbd_count = lsbc_count' @@ -404,14 +406,16 @@ applying each individual transaction - exactly as it was in for `BlockC`: > instance ApplyBlock (LedgerState BlockD) BlockD where > applyBlockLedgerResult _ldgrCfg b tickedLdgrSt = -> pure LedgerResult { lrResult = b `applyBlockTo` tickedLdgrSt +> pure LedgerResult { lrResult = convertMapKind $ b `applyBlockTo` tickedLdgrSt > , lrEvents = [] > } > > reapplyBlockLedgerResult _ldgrCfg b tickedLdgrSt = -> LedgerResult { lrResult = b `applyBlockTo` tickedLdgrSt +> LedgerResult { lrResult = convertMapKind $ b `applyBlockTo` tickedLdgrSt > , lrEvents = [] > } +> +> getBlockKeySets = const trivialLedgerTables Note that prior to `applyBlockLedgerResult` being invoked, the calling code will have already established that the header is valid and that the header matches @@ -482,7 +486,7 @@ specific to `PrtclD`: > data ChainDepStateD = ChainDepStateD > deriving (Eq,Show,Generic,NoThunks) -However, the `Ticked` representation contains the `LedgerViewD` containing the +However, the `Ticked1` representation contains the `LedgerViewD` containing the epoch snapshot. This is due to functions for `ConsensusProtocol` only taking the `LedgerView` as an argument in some cases: @@ -665,3 +669,22 @@ involving `BlockC`: While this is a large ecosystem of interrelated typeclasses and families, the overall organization of things is such that Haskell's type checking can help guide the implementation. + +Appendix: UTxO-HD features +========================== + +For reference on these instances and their meaning, please see the appendix in +[the Simple tutorial](./Simple.lhs). + +> type instance Key (LedgerState BlockD) = Void +> type instance Value (LedgerState BlockD) = Void +> +> instance HasLedgerTables (LedgerState BlockD) +> instance HasLedgerTables (Ticked1 (LedgerState BlockD)) +> instance CanSerializeLedgerTables (LedgerState BlockD) +> instance CanStowLedgerTables (LedgerState BlockD) +> instance LedgerTablesAreTrivial (LedgerState BlockD) where +> convertMapKind (LedgerD x y z z') = LedgerD x y z z' +> instance LedgerTablesAreTrivial (Ticked1 (LedgerState BlockD)) where +> convertMapKind (TickedLedgerStateD x) = +> TickedLedgerStateD (convertMapKind x) diff --git a/ouroboros-consensus/test/consensus-test/Main.hs b/ouroboros-consensus/test/consensus-test/Main.hs index e609007823..72a58a1ffc 100644 --- a/ouroboros-consensus/test/consensus-test/Main.hs +++ b/ouroboros-consensus/test/consensus-test/Main.hs @@ -5,8 +5,11 @@ import qualified Test.Consensus.HardFork.Forecast (tests) import qualified Test.Consensus.HardFork.History (tests) import qualified Test.Consensus.HardFork.Summary (tests) import qualified Test.Consensus.HeaderValidation (tests) +import qualified Test.Consensus.Ledger.Tables.Diff (tests) +import qualified Test.Consensus.Ledger.Tables.DiffSeq (tests) import qualified Test.Consensus.Mempool (tests) import qualified Test.Consensus.Mempool.Fairness (tests) +import qualified Test.Consensus.Mempool.StateMachine (tests) import qualified Test.Consensus.MiniProtocol.BlockFetch.Client (tests) import qualified Test.Consensus.MiniProtocol.ChainSync.Client (tests) import qualified Test.Consensus.MiniProtocol.LocalStateQuery.Server (tests) @@ -27,8 +30,11 @@ tests = , Test.Consensus.MiniProtocol.BlockFetch.Client.tests , Test.Consensus.MiniProtocol.ChainSync.Client.tests , Test.Consensus.MiniProtocol.LocalStateQuery.Server.tests - , Test.Consensus.Mempool.tests - , Test.Consensus.Mempool.Fairness.tests + , testGroup "Mempool" + [ Test.Consensus.Mempool.tests + , Test.Consensus.Mempool.Fairness.tests + , Test.Consensus.Mempool.StateMachine.tests + ] , Test.Consensus.Util.MonadSTM.NormalForm.tests , Test.Consensus.Util.Versioned.tests , testGroup "HardFork" [ @@ -40,4 +46,6 @@ tests = Test.Consensus.HardFork.Forecast.tests ] ] + , Test.Consensus.Ledger.Tables.Diff.tests + , Test.Consensus.Ledger.Tables.DiffSeq.tests ] diff --git a/ouroboros-consensus/test/consensus-test/Test/Consensus/BlockchainTime/Simple.hs b/ouroboros-consensus/test/consensus-test/Test/Consensus/BlockchainTime/Simple.hs index 24ea723354..30be744509 100644 --- a/ouroboros-consensus/test/consensus-test/Test/Consensus/BlockchainTime/Simple.hs +++ b/ouroboros-consensus/test/consensus-test/Test/Consensus/BlockchainTime/Simple.hs @@ -41,6 +41,7 @@ import Control.Applicative (Alternative (..)) import qualified Control.Concurrent.Class.MonadMVar.Strict as Strict import qualified Control.Concurrent.Class.MonadSTM.Strict as Strict import Control.Monad (MonadPlus, when) +import Control.Monad.Class.MonadSay import qualified Control.Monad.Class.MonadSTM.Internal as LazySTM import Control.Monad.Class.MonadTime import qualified Control.Monad.Class.MonadTimer as MonadTimer @@ -598,6 +599,9 @@ instance (MonadAsync m, MonadMask m, MonadThrow (STM m)) => MonadAsync (Override waitCatchSTM = OverrideDelaySTM . lift . waitCatchSTM . unOverrideDelayAsync pollSTM = OverrideDelaySTM . lift . pollSTM . unOverrideDelayAsync +instance MonadSay m => MonadSay (OverrideDelay m) where + say = OverrideDelay . lift . say + instance (IOLike m, MonadDelay (OverrideDelay m)) => IOLike (OverrideDelay m) where forgetSignKeyKES = OverrideDelay . lift . forgetSignKeyKES diff --git a/ouroboros-consensus/test/consensus-test/Test/Consensus/HardFork/Forecast.hs b/ouroboros-consensus/test/consensus-test/Test/Consensus/HardFork/Forecast.hs index e9f3117d0a..0a791f91d8 100644 --- a/ouroboros-consensus/test/consensus-test/Test/Consensus/HardFork/Forecast.hs +++ b/ouroboros-consensus/test/consensus-test/Test/Consensus/HardFork/Forecast.hs @@ -3,6 +3,7 @@ {-# LANGUAGE FlexibleInstances #-} {-# LANGUAGE GADTs #-} {-# LANGUAGE GeneralizedNewtypeDeriving #-} +{-# LANGUAGE PolyKinds #-} {-# LANGUAGE RecordWildCards #-} {-# LANGUAGE ScopedTypeVariables #-} {-# LANGUAGE StandaloneDeriving #-} @@ -45,6 +46,7 @@ import Ouroboros.Consensus.HardFork.History (Bound (..), EraEnd (..), EraParams (..), EraSummary (..), Summary (..)) import qualified Ouroboros.Consensus.HardFork.History as History import Ouroboros.Consensus.HardFork.History.Util +import Ouroboros.Consensus.Ledger.Tables.Combinators (K2 (..)) import Ouroboros.Consensus.Util (Some (..), repeatedly, splits) import Test.Consensus.HardFork.Infra import Test.QuickCheck hiding (elements) @@ -221,13 +223,13 @@ withinEraForecast maxLookAhead st = Forecast{ -- | Translations between eras translations :: forall xs. TestSetup xs - -> InPairs (CrossEraForecaster (K LedgerState) (K LedgerView)) xs + -> InPairs (CrossEraForecaster (K2 LedgerState) (K LedgerView)) xs translations TestSetup{..} = case isNonEmpty (Proxy @xs) of ProofNonEmpty{} -> go testLookahead where go :: Exactly (x ': xs') MaxLookahead - -> InPairs (CrossEraForecaster (K LedgerState) (K LedgerView)) (x ': xs') + -> InPairs (CrossEraForecaster (K2 LedgerState) (K LedgerView)) (x ': xs') go (ExactlyCons _ ExactlyNil) = InPairs.PNil go (ExactlyCons this rest@(ExactlyCons next _)) = @@ -235,9 +237,9 @@ translations TestSetup{..} = tr :: MaxLookahead -- ^ Look-ahead in the current era -> MaxLookahead -- ^ Look-ahead in the next era - -> CrossEraForecaster (K LedgerState) (K LedgerView) era era' + -> CrossEraForecaster (K2 LedgerState) (K LedgerView) era era' tr thisLookahead nextLookahead = - CrossEraForecaster $ \transition sno (K st) -> + CrossEraForecaster $ \transition sno (K2 st) -> assert (sno >= boundSlot transition) $ do let tip :: WithOrigin SlotNo tip = ledgerTip st @@ -278,7 +280,7 @@ acrossErasForecast setup@TestSetup{..} ledgerStates = . hardForkLedgerViewPerEra go :: NonEmpty xs' TestEra - -> Telescope (K Past) (Current (AnnForecast (K LedgerState) (K LedgerView))) xs' + -> Telescope (K Past) (Current (AnnForecast (K2 LedgerState) (K LedgerView))) xs' go (NonEmptyOne era) = assert (testEraContains testForecastAt era) $ TZ $ Current { @@ -288,7 +290,7 @@ acrossErasForecast setup@TestSetup{..} ledgerStates = withinEraForecast (testEraMaxLookahead era) st - , annForecastState = K st + , annForecastState = K2 st , annForecastTip = testForecastAt , annForecastEnd = Nothing } @@ -305,7 +307,7 @@ acrossErasForecast setup@TestSetup{..} ledgerStates = withinEraForecast (testEraMaxLookahead era) st - , annForecastState = K st + , annForecastState = K2 st , annForecastTip = testForecastAt , annForecastEnd = Just end } diff --git a/ouroboros-consensus/test/consensus-test/Test/Consensus/HardFork/History.hs b/ouroboros-consensus/test/consensus-test/Test/Consensus/HardFork/History.hs index 5d35b1f68b..85bb9ea982 100644 --- a/ouroboros-consensus/test/consensus-test/Test/Consensus/HardFork/History.hs +++ b/ouroboros-consensus/test/consensus-test/Test/Consensus/HardFork/History.hs @@ -51,6 +51,7 @@ import Ouroboros.Consensus.HardFork.Combinator.Protocol.LedgerView import qualified Ouroboros.Consensus.HardFork.Combinator.State as State import Ouroboros.Consensus.HardFork.Combinator.State.Types import qualified Ouroboros.Consensus.HardFork.History as HF +import Ouroboros.Consensus.Ledger.Tables.Combinators import Ouroboros.Consensus.Util (nTimes) import Test.Cardano.Slotting.Numeric () import Test.Consensus.HardFork.Infra @@ -845,14 +846,14 @@ mockHardForkLedgerView = \(HF.Shape pss) (HF.Transitions ts) (Chain ess) -> -> Exactly (x ': xs) HF.EraParams -> AtMost xs EpochNo -> NonEmpty (x ': xs) [Event] - -> Telescope (K Past) (Current (AnnForecast (K ()) (K ()))) (x : xs) + -> Telescope (K Past) (Current (AnnForecast (K2 ()) (K ()))) (x : xs) mockState start (ExactlyCons ps _) ts (NonEmptyOne es) = TZ $ Current start $ AnnForecast { annForecast = Forecast { forecastAt = tip es -- forecast at tip of ledger , forecastFor = \_for -> return $ K () } - , annForecastState = K () + , annForecastState = K2 () , annForecastTip = tip es , annForecastEnd = HF.mkUpperBound ps start <$> atMostHead ts } diff --git a/ouroboros-consensus/test/consensus-test/Test/Consensus/Ledger/Tables/Diff.hs b/ouroboros-consensus/test/consensus-test/Test/Consensus/Ledger/Tables/Diff.hs new file mode 100644 index 0000000000..ec2eb0e79a --- /dev/null +++ b/ouroboros-consensus/test/consensus-test/Test/Consensus/Ledger/Tables/Diff.hs @@ -0,0 +1,120 @@ +{-# LANGUAGE DerivingStrategies #-} +{-# LANGUAGE FlexibleContexts #-} +{-# LANGUAGE GeneralisedNewtypeDeriving #-} +{-# LANGUAGE MultiParamTypeClasses #-} +{-# LANGUAGE NamedFieldPuns #-} +{-# LANGUAGE ScopedTypeVariables #-} +{-# LANGUAGE StandaloneDeriving #-} +{-# LANGUAGE TypeApplications #-} + +{-# OPTIONS_GHC -Wno-orphans #-} + +module Test.Consensus.Ledger.Tables.Diff (tests) where + +import Data.Foldable as F +import Data.Map.Strict (Map) +import qualified Data.Map.Strict as Map +import qualified Data.Set as Set +import Data.Typeable +import Ouroboros.Consensus.Ledger.Tables.Diff +import Test.QuickCheck.Classes +import Test.Tasty (TestTree, testGroup) +import Test.Tasty.QuickCheck hiding (Negative, Positive) + +tests :: TestTree +tests = testGroup "Test.Consensus.Ledger.Tables.Diff" [ + testGroup "quickcheck-classes" [ + lawsTestOne (Proxy @(Diff K V)) [ + semigroupLaws + , monoidLaws + ] + ] + , testGroup "Applying diffs" [ + testProperty "prop_diffThenApply" prop_diffThenApply + , testProperty "prop_applyMempty" prop_applyMempty + , testProperty "prop_applySum" prop_applySum + , testProperty "prop_applyDiffNumInsertsDeletes" prop_applyDiffNumInsertsDeletes + , testProperty "prop_applyDiffNumInsertsDeletesExact" prop_applyDiffNumInsertsDeletesExact + ] + ] + +{------------------------------------------------------------------------------ + Running laws in test trees +------------------------------------------------------------------------------} + +lawsTest :: Laws -> TestTree +lawsTest Laws{lawsTypeclass, lawsProperties} = testGroup lawsTypeclass $ + fmap (uncurry testProperty) lawsProperties + +lawsTestOne :: Typeable a => Proxy a -> [Proxy a -> Laws] -> TestTree +lawsTestOne p tts = + testGroup (show $ typeOf p) (fmap (\f -> lawsTest $ f p) tts) + +{------------------------------------------------------------------------------ + Applying diffs +------------------------------------------------------------------------------} + +type K = Int +type V = Char + +-- | Applying a diff computed from a source and target value should +-- produce the target value. +prop_diffThenApply :: Map K V -> Map K V -> Property +prop_diffThenApply x y = applyDiff x (diff x y) === y + +-- | Applying an empty diff is the identity function. +prop_applyMempty :: Map K V -> Property +prop_applyMempty x = applyDiff x mempty === x + +-- | Applying a sum of diffs is equivalent to applying each @'Diff'@ +-- separately (in order). +prop_applySum :: Map K V -> [Diff K V] -> Property +prop_applySum x ds = F.foldl' applyDiff x ds === applyDiff x (foldMap' id ds) + +-- | Applying a @'Diff' d@ to a @'Map' m@ increases the size of @m@ by exactly +-- @numInserts d - numDeletes d@ if @d@ inserts only new keys and @d@ only +-- deletes existing keys. +-- +-- Diffing two 'Map's that have disjoint keysets creates exactly a diff @d@ that +-- only inserts new keys and deletes existing keys. +prop_applyDiffNumInsertsDeletesExact :: Map K V -> Map K V -> Property +prop_applyDiffNumInsertsDeletesExact m1 m2 = + Map.keysSet m1 `Set.disjoint` Map.keysSet m2 ==> + Map.size (applyDiff m1 d) === + Map.size m1 + numInserts d - numDeletes d + where + d = diff m1 m2 + +-- | Applying a @'Diff' d@ to a @'Map' m@ may increase/decrease the size of @m@ +-- up to bounds depending on the number of inserts and deletes in @d@. +-- +-- * The size of @m@ may /decrease/ by up to the number of deletes in @d@. This +-- happens if @d@ does not insert any new keys. +-- * The size of @m@ may /increase/ by up to the number of inserts in @d@. This +-- if @d@ does not delete any existing keys. +prop_applyDiffNumInsertsDeletes :: Map K V -> Diff K V -> Property +prop_applyDiffNumInsertsDeletes m d = property $ + lb <= n' && n' <= ub + where + n = Map.size m + nInserts = numInserts d + nDeletes = numDeletes d + n' = Map.size (applyDiff m d) + lb = n - nDeletes + ub = n + nInserts + +{------------------------------------------------------------------------------ + Plain @'Arbitrary'@ instances +------------------------------------------------------------------------------} + +deriving newtype instance (Ord k, Arbitrary k, Arbitrary v) + => Arbitrary (Diff k v) + +instance Arbitrary v => Arbitrary (Delta v) where + arbitrary = oneof [ + Insert <$> arbitrary + , pure Delete + ] + shrink de = case de of + Insert x -> Insert <$> shrink x + Delete -> [] diff --git a/ouroboros-consensus/test/consensus-test/Test/Consensus/Ledger/Tables/DiffSeq.hs b/ouroboros-consensus/test/consensus-test/Test/Consensus/Ledger/Tables/DiffSeq.hs new file mode 100644 index 0000000000..ef94049f15 --- /dev/null +++ b/ouroboros-consensus/test/consensus-test/Test/Consensus/Ledger/Tables/DiffSeq.hs @@ -0,0 +1,97 @@ +{-# LANGUAGE DerivingStrategies #-} +{-# LANGUAGE GeneralizedNewtypeDeriving #-} +{-# LANGUAGE NamedFieldPuns #-} +{-# LANGUAGE StandaloneDeriving #-} +{-# LANGUAGE TypeApplications #-} +{-# OPTIONS_GHC -Wno-orphans #-} + +module Test.Consensus.Ledger.Tables.DiffSeq (tests) where + +import Control.Monad (liftM) +import qualified Data.FingerTree.RootMeasured.Strict as RMFT +import Data.Map.Diff.Strict (Delta (..), Diff) +import Data.Map.Diff.Strict.Internal (DeltaHistory (..), Diff (..)) +import Data.Maybe.Strict (StrictMaybe (..)) +import Data.Sequence.NonEmpty (NESeq (..)) +import Data.Typeable +import Ouroboros.Consensus.Ledger.Tables.DiffSeq +import qualified Ouroboros.Consensus.Ledger.Tables.DiffSeq as DS +import Test.QuickCheck.Classes +import Test.QuickCheck.Classes.Semigroup.Cancellative +import Test.Tasty +import Test.Tasty.QuickCheck +import Test.Util.Orphans.Arbitrary () + +tests :: TestTree +tests = testGroup "Test.Consensus.Ledger.Tables.DiffSeq" [ + lawsTestOne (Proxy @(RootMeasure Key Val)) [ + semigroupLaws + , monoidLaws + , leftReductiveLaws + , rightReductiveLaws + , leftCancellativeLaws + , rightCancellativeLaws + ] + , lawsTestOne (Proxy @(InternalMeasure Key Val)) [ + semigroupLaws + , monoidLaws + ] + ] + +type Key = Small Int +type Val = Small Int + +{------------------------------------------------------------------------------ + Running laws in test trees +------------------------------------------------------------------------------} + +lawsTest :: Laws -> TestTree +lawsTest Laws{lawsTypeclass, lawsProperties} = testGroup lawsTypeclass $ + fmap (uncurry testProperty) lawsProperties + +lawsTestOne :: Typeable a => Proxy a -> [Proxy a -> Laws] -> TestTree +lawsTestOne p tts = + testGroup (show $ typeOf p) (fmap (\f -> lawsTest $ f p) tts) + +{------------------------------------------------------------------------------ + Diffs +------------------------------------------------------------------------------} + +deriving newtype instance (Ord k, Arbitrary k, Arbitrary v) + => Arbitrary (Diff k v) + +instance (Arbitrary v) => Arbitrary (DeltaHistory v) where + arbitrary = DeltaHistory <$> + ((:<||) <$> arbitrary <*> arbitrary) + +instance (Arbitrary v) => Arbitrary (Delta v) where + arbitrary = oneof [ + Insert <$> arbitrary + , pure Delete + ] + +{------------------------------------------------------------------------------- + DiffSeq +-------------------------------------------------------------------------------} + +instance (RMFT.SuperMeasured vt vi a, Arbitrary a) + => Arbitrary (RMFT.StrictFingerTree vt vi a) where + arbitrary = RMFT.fromList <$> arbitrary + +instance (Ord k, Arbitrary k, Arbitrary v) + => Arbitrary (RootMeasure k v) where + arbitrary = RootMeasure <$> arbitrary <*> arbitrary + <*> arbitrary <*> arbitrary + +instance Arbitrary (InternalMeasure k v) where + arbitrary = InternalMeasure <$> arbitrary <*> arbitrary1 <*> arbitrary1 + +deriving newtype instance Arbitrary DS.Length +deriving newtype instance Arbitrary DS.SlotNoUB +deriving newtype instance Arbitrary DS.SlotNoLB + +instance Arbitrary1 StrictMaybe where + liftArbitrary arb = frequency [(1, return SNothing), (3, liftM SJust arb)] + + liftShrink shr (SJust x) = SNothing : [ SJust x' | x' <- shr x ] + liftShrink _ SNothing = [] diff --git a/ouroboros-consensus/test/consensus-test/Test/Consensus/Mempool.hs b/ouroboros-consensus/test/consensus-test/Test/Consensus/Mempool.hs index a583914def..f48ea34aff 100644 --- a/ouroboros-consensus/test/consensus-test/Test/Consensus/Mempool.hs +++ b/ouroboros-consensus/test/consensus-test/Test/Consensus/Mempool.hs @@ -1,8 +1,8 @@ +{-# LANGUAGE DataKinds #-} {-# LANGUAGE FlexibleContexts #-} {-# LANGUAGE GADTs #-} {-# LANGUAGE LambdaCase #-} {-# LANGUAGE NamedFieldPuns #-} -{-# LANGUAGE NumericUnderscores #-} {-# LANGUAGE ParallelListComp #-} {-# LANGUAGE RankNTypes #-} {-# LANGUAGE RecordWildCards #-} @@ -32,40 +32,34 @@ -- module Test.Consensus.Mempool (tests) where -import Cardano.Binary (Encoding, toCBOR) +import Cardano.Binary (toCBOR) import Cardano.Crypto.Hash -import Control.Exception (assert) -import Control.Monad (foldM, forM, forM_, guard, void) -import Control.Monad.Except (Except, runExcept) +import Control.Monad (foldM, forM, forM_, void) +import Control.Monad.Except (runExcept) import Control.Monad.IOSim (runSimOrThrow) import Control.Monad.State (State, evalState, get, modify) import Control.Tracer (Tracer (..)) import Data.Bifunctor (first, second) import Data.Either (isRight) -import Data.List as List (foldl', isSuffixOf, nub, partition, sortOn) +import qualified Data.List as List +import qualified Data.List.NonEmpty as NE import Data.Map.Strict (Map) import qualified Data.Map.Strict as Map import Data.Maybe (mapMaybe) import Data.Semigroup (stimes) -import Data.Set (Set) import qualified Data.Set as Set -import Data.Word (Word32) -import GHC.Stack (HasCallStack) +import Data.Word import Ouroboros.Consensus.Block -import Ouroboros.Consensus.BlockchainTime -import Ouroboros.Consensus.Config.SecurityParam -import qualified Ouroboros.Consensus.HardFork.History as HardFork import Ouroboros.Consensus.Ledger.Abstract import Ouroboros.Consensus.Ledger.SupportsMempool +import Ouroboros.Consensus.Ledger.Tables.Utils import Ouroboros.Consensus.Mempool import Ouroboros.Consensus.Mempool.TxSeq as TxSeq import Ouroboros.Consensus.Mock.Ledger hiding (TxId) -import Ouroboros.Consensus.Node.ProtocolInfo (NumCoreNodes (..)) -import Ouroboros.Consensus.Protocol.BFT -import Ouroboros.Consensus.Util (repeatedly, repeatedlyM, - safeMaximumOn, (.:)) +import Ouroboros.Consensus.Util (repeatedly, repeatedlyM) import Ouroboros.Consensus.Util.Condense (condense) import Ouroboros.Consensus.Util.IOLike +import Test.Consensus.Mempool.Util import Test.Crypto.Hash () import Test.QuickCheck hiding (elements) import Test.Tasty (TestTree, testGroup) @@ -76,34 +70,35 @@ import Test.Util.QuickCheck (elements) tests :: TestTree tests = testGroup "Mempool" [ testGroup "TxSeq" - [ testProperty "lookupByTicketNo complete" prop_TxSeq_lookupByTicketNo_complete - , testProperty "lookupByTicketNo sound" prop_TxSeq_lookupByTicketNo_sound - , testProperty "splitAfterTxSize" prop_TxSeq_splitAfterTxSize - , testProperty "splitAfterTxSizeSpec" prop_TxSeq_splitAfterTxSizeSpec + [ testProperty "lookupByTicketNo complete" prop_TxSeq_lookupByTicketNo_complete + , testProperty "lookupByTicketNo sound" prop_TxSeq_lookupByTicketNo_sound + , testProperty "splitAfterTxSize" prop_TxSeq_splitAfterTxSize + , testProperty "splitAfterTxSizeSpec" prop_TxSeq_splitAfterTxSizeSpec + ] + , testGroup "IOSim properties" + [ + testProperty "snapshotTxs == snapshotTxsAfter zeroTicketNo" prop_Mempool_snapshotTxs_snapshotTxsAfter + , testProperty "valid added txs == getTxs" prop_Mempool_addTxs_getTxs + , testProperty "addTxs [..] == forM [..] addTxs" prop_Mempool_semigroup_addTxs + , testProperty "result of addTxs" prop_Mempool_addTxs_result + , testProperty "Invalid transactions are never added" prop_Mempool_InvalidTxsNeverAdded + , testProperty "removeTxs" prop_Mempool_removeTxs + , testProperty "removeTxs [..] == forM [..] removeTxs" prop_Mempool_semigroup_removeTxs + , testProperty "result of getCapacity" prop_Mempool_getCapacity + -- FIXME: we should add an issue to test this aspect somehow. + -- , testProperty "Mempool capacity implementation" prop_Mempool_Capacity + , testProperty "Added valid transactions are traced" prop_Mempool_TraceValidTxs + , testProperty "Rejected invalid txs are traced" prop_Mempool_TraceRejectedTxs + , testProperty "Removed invalid txs are traced" prop_Mempool_TraceRemovedTxs + , testProperty "idx consistency" prop_Mempool_idx_consistency ] - , testProperty "snapshotTxs == snapshotTxsAfter zeroIdx" prop_Mempool_snapshotTxs_snapshotTxsAfter - , testProperty "valid added txs == getTxs" prop_Mempool_addTxs_getTxs - , testProperty "addTxs [..] == forM [..] addTxs" prop_Mempool_semigroup_addTxs - , testProperty "result of addTxs" prop_Mempool_addTxs_result - , testProperty "Invalid transactions are never added" prop_Mempool_InvalidTxsNeverAdded - , testProperty "result of getCapacity" prop_Mempool_getCapacity - -- , testProperty "Mempool capacity implementation" prop_Mempool_Capacity - -- FIXME: we should add an issue to test this aspect somehow. - , testProperty "Added valid transactions are traced" prop_Mempool_TraceValidTxs - , testProperty "Rejected invalid txs are traced" prop_Mempool_TraceRejectedTxs - , testProperty "Removed invalid txs are traced" prop_Mempool_TraceRemovedTxs - , testProperty "idx consistency" prop_Mempool_idx_consistency - , testProperty "removeTxs" prop_Mempool_removeTxs - , testProperty "removeTxs [..] == forM [..] removeTxs" prop_Mempool_semigroup_removeTxs ] -type TheMeasure = IgnoringOverflow ByteSize32 - {------------------------------------------------------------------------------- Mempool Implementation Properties -------------------------------------------------------------------------------} --- | Test that @snapshotTxs == snapshotTxsAfter zeroIdx@. +-- | Test that @snapshotTxs == snapshotTxsAfter zeroTicketNo@. prop_Mempool_snapshotTxs_snapshotTxsAfter :: TestSetup -> Property prop_Mempool_snapshotTxs_snapshotTxsAfter setup = withTestMempool setup $ \TestMempool { mempool } -> do @@ -119,7 +114,7 @@ prop_Mempool_addTxs_getTxs setup = _ <- addTxs mempool (allTxs setup) MempoolSnapshot { snapshotTxs } <- atomically $ getSnapshot mempool return $ counterexample (ppTxs (txs setup)) $ - validTxs setup `isSuffixOf` map (txForgetValidated . prjTx) snapshotTxs + validTxs setup `List.isSuffixOf` map (txForgetValidated . prjTx) snapshotTxs -- | Test that both adding the transactions one by one and adding them in one go -- produce the same result. @@ -180,7 +175,7 @@ prop_Mempool_removeTxs :: TestSetupWithTxInMempool -> Property prop_Mempool_removeTxs (TestSetupWithTxInMempool testSetup txToRemove) = withTestMempool testSetup $ \TestMempool { mempool } -> do let Mempool { removeTxs, getSnapshot } = mempool - removeTxs [txId txToRemove] + removeTxs $ NE.fromList [txId txToRemove] txsInMempoolAfter <- map prjTx . snapshotTxs <$> atomically getSnapshot return $ counterexample ("Transactions in the mempool after removing (" <> @@ -189,14 +184,14 @@ prop_Mempool_removeTxs (TestSetupWithTxInMempool testSetup txToRemove) = -- | Test that both removing transactions one by one and removing them in one go -- produce the same result. -prop_Mempool_semigroup_removeTxs :: TestSetupWithTxsInMempool -> Property -prop_Mempool_semigroup_removeTxs (TestSetupWithTxsInMempool testSetup txsToRemove) = +prop_Mempool_semigroup_removeTxs :: TestSetupWithTxsInMempoolToRemove -> Property +prop_Mempool_semigroup_removeTxs (TestSetupWithTxsInMempoolToRemove testSetup txsToRemove) = withTestMempool testSetup $ \TestMempool {mempool = mempool1} -> do - removeTxs mempool1 $ map txId txsToRemove + removeTxs mempool1 $ NE.map txId txsToRemove snapshot1 <- atomically (getSnapshot mempool1) return $ withTestMempool testSetup $ \TestMempool {mempool = mempool2} -> do - forM_ (map txId txsToRemove) (removeTxs mempool2 . (:[])) + forM_ (NE.map txId txsToRemove) (removeTxs mempool2 . (NE.:| [])) snapshot2 <- atomically (getSnapshot mempool2) return $ counterexample @@ -290,7 +285,7 @@ prop_Mempool_TraceRemovedTxs setup = return $ classify (not (null removedTxs)) "Removed some transactions" $ map (const (Right ())) errs === errs .&&. - sortOn fst expected === sortOn fst removedTxs + List.sortOn fst expected === List.sortOn fst removedTxs where cfg = testLedgerCfg setup @@ -298,7 +293,7 @@ prop_Mempool_TraceRemovedTxs setup = isRemoveTxsEvent (TraceMempoolRemoveTxs txs _) = Just (map (first txForgetValidated) txs) isRemoveTxsEvent _ = Nothing - expectedToBeRemoved :: LedgerState TestBlock -> [TestTx] -> [(TestTx, TestTxError)] + expectedToBeRemoved :: LedgerState TestBlock ValuesMK -> [TestTx] -> [(TestTx, TestTxError)] expectedToBeRemoved ledgerState txsInMempool = [ (tx, err) | (tx, Left err) <- fst $ validateTxs cfg ledgerState txsInMempool @@ -313,37 +308,9 @@ prjTx (a, _b, _c) = a TestSetup: how to set up a TestMempool -------------------------------------------------------------------------------} -type TestBlock = SimpleBftBlock SimpleMockCrypto BftMockCrypto - -type TestTx = GenTx TestBlock - -type TestTxId = TxId TestTx - -type TestTxError = ApplyTxErr TestBlock - --- There are 5 (core)nodes and each gets 1000. -testInitLedger :: LedgerState TestBlock -testInitLedger = genesisSimpleLedgerState $ mkAddrDist (NumCoreNodes 5) - --- | Test config --- --- (We don't really care about most of these values here) -mkTestLedgerConfig :: MockConfig -> LedgerConfig TestBlock -mkTestLedgerConfig mockCfg = SimpleLedgerConfig { - simpleMockLedgerConfig = () - , simpleLedgerEraParams = - HardFork.defaultEraParams - (SecurityParam 4) - (slotLengthFromSec 20) - , simpleLedgerMockConfig = mockCfg - } - -testLedgerConfigNoSizeLimits :: LedgerConfig TestBlock -testLedgerConfigNoSizeLimits = mkTestLedgerConfig defaultMockConfig - data TestSetup = TestSetup { testLedgerCfg :: LedgerConfig TestBlock - , testLedgerState :: LedgerState TestBlock + , testLedgerState :: LedgerState TestBlock ValuesMK , testInitialTxs :: [TestTx] -- ^ These are all valid and will be the initial contents of the Mempool. , testMempoolCapOverride :: MempoolCapacityBytesOverride @@ -369,7 +336,7 @@ ppTestTxWithHash x = condense -- -- The generated 'testMempoolCap' will be: -- > foldMap 'genTxSize' 'testInitialTxs' + extraCapacity -genTestSetupWithExtraCapacity :: Int -> ByteSize32 -> Gen (TestSetup, LedgerState TestBlock) +genTestSetupWithExtraCapacity :: Int -> ByteSize32 -> Gen (TestSetup, LedgerState TestBlock ValuesMK) genTestSetupWithExtraCapacity maxInitialTxs extraCapacity = do ledgerSize <- choose (0, maxInitialTxs) nbInitialTxs <- choose (0, maxInitialTxs) @@ -388,7 +355,7 @@ genTestSetupWithExtraCapacity maxInitialTxs extraCapacity = do -- | Generate a 'TestSetup' and return the ledger obtained by applying all of -- the initial transactions. Generates setups with a fixed -- 'MempoolCapacityBytesOverride', no 'NoMempoolCapacityBytesOverride'. -genTestSetup :: Int -> Gen (TestSetup, LedgerState TestBlock) +genTestSetup :: Int -> Gen (TestSetup, LedgerState TestBlock ValuesMK) genTestSetup maxInitialTxs = genTestSetupWithExtraCapacity maxInitialTxs (ByteSize32 0) @@ -441,51 +408,19 @@ instance Arbitrary TestSetup where , isRight $ txsAreValid testLedgerCfg testLedgerState testInitialTxs' ] --- | Generate a number of valid and invalid transactions and apply the valid --- transactions to the given 'LedgerState'. The transactions along with a --- 'Bool' indicating whether its valid ('True') or invalid ('False') and the --- resulting 'LedgerState' are returned. -genTxs :: Int -- ^ The number of transactions to generate - -> LedgerState TestBlock - -> Gen ([(TestTx, Bool)], LedgerState TestBlock) -genTxs = go [] - where - go txs n ledger - | n <= 0 = return (reverse txs, ledger) - | otherwise = do - valid <- arbitrary - if valid - then do - (validTx, ledger') <- genValidTx ledger - go ((validTx, True):txs) (n - 1) ledger' - else do - invalidTx <- genInvalidTx ledger - go ((invalidTx, False):txs) (n - 1) ledger - -mustBeValid :: HasCallStack - => Except TestTxError (LedgerState TestBlock) - -> LedgerState TestBlock -mustBeValid ex = case runExcept ex of - Left _ -> error "impossible" - Right ledger -> ledger - -txIsValid :: LedgerConfig TestBlock -> LedgerState TestBlock -> TestTx -> Bool -txIsValid cfg ledgerState tx = - isRight $ runExcept $ applyTxToLedger cfg ledgerState tx - txsAreValid :: LedgerConfig TestBlock - -> LedgerState TestBlock + -> LedgerState TestBlock ValuesMK -> [TestTx] - -> Either TestTxError (LedgerState TestBlock) + -> Either TestTxError (LedgerState TestBlock ValuesMK) txsAreValid cfg ledgerState txs = runExcept $ repeatedlyM (flip (applyTxToLedger cfg)) txs ledgerState validateTxs :: LedgerConfig TestBlock - -> LedgerState TestBlock + -> LedgerState TestBlock ValuesMK -> [TestTx] - -> ([(TestTx, Either TestTxError ())], LedgerState TestBlock) + -> ([(TestTx, Either TestTxError ())], LedgerState TestBlock ValuesMK) validateTxs cfg = go [] where go revalidated ledgerState = \case @@ -494,126 +429,6 @@ validateTxs cfg = go [] Left err -> go ((tx, Left err):revalidated) ledgerState txs' Right ledgerState' -> go ((tx, Right ()):revalidated) ledgerState' txs' --- | Generate a number of valid transactions and apply these to the given --- 'LedgerState'. The transactions and the resulting 'LedgerState' are --- returned. -genValidTxs :: Int -- ^ The number of valid transactions to generate - -> LedgerState TestBlock - -> Gen ([TestTx], LedgerState TestBlock) -genValidTxs = go [] - where - go txs n ledger - | n <= 0 = return (reverse txs, ledger) - | otherwise = do - (tx, ledger') <- genValidTx ledger - go (tx:txs) (n - 1) ledger' - --- | Generate a valid transaction (but ignoring any per-tx size limits, see Note --- [Transaction size limit]). -genValidTx :: LedgerState TestBlock -> Gen (TestTx, LedgerState TestBlock) -genValidTx ledgerState@(SimpleLedgerState MockState { mockUtxo = utxo }) = do - -- Never let someone go broke, otherwise we risk concentrating all the - -- wealth in one person. That would be problematic (for the society) but - -- also because we wouldn't be able to generate any valid transactions - -- anymore. - - let sender - | Just (richest, _) <- safeMaximumOn snd $ Map.toList $ - sum . map snd <$> peopleWithFunds - = richest - | otherwise - = error "no people with funds" - - recipient <- elements $ filter (/= sender) $ Map.keys peopleWithFunds - let assets = peopleWithFunds Map.! sender - fortune = sum (map snd assets) - ins = Set.fromList $ map fst assets - - -- At most spent half of someone's fortune - amount <- choose (1, fortune `div` 2) - let outRecipient = (recipient, amount) - outs - | amount == fortune - = [outRecipient] - | otherwise - = [outRecipient, (sender, fortune - amount)] - tx = mkSimpleGenTx $ Tx DoNotExpire ins outs - return (tx, mustBeValid (applyTxToLedger testLedgerConfigNoSizeLimits ledgerState tx)) - where - peopleWithFunds :: Map Addr [(TxIn, Amount)] - peopleWithFunds = Map.unionsWith (<>) - [ Map.singleton addr [(txIn, amount)] - | (txIn, (addr, amount)) <- Map.toList utxo - ] - -genInvalidTx :: LedgerState TestBlock -> Gen TestTx -genInvalidTx ledgerState@(SimpleLedgerState MockState { mockUtxo = utxo }) = do - let peopleWithFunds = nub $ map fst $ Map.elems utxo - sender <- elements peopleWithFunds - recipient <- elements $ filter (/= sender) peopleWithFunds - let assets = filter (\(_, (addr, _)) -> addr == sender) $ Map.toList utxo - ins = Set.fromList $ map fst assets - -- There is only 5 000 in 'testInitLedger', so any transaction spending - -- more than 5 000 is invalid. - amount <- choose (5_001, 10_000) - let outs = [(recipient, amount)] - tx = mkSimpleGenTx $ Tx DoNotExpire ins outs - return $ assert (not (txIsValid testLedgerConfigNoSizeLimits ledgerState tx)) tx - --- | Generate an invalid tx that is larger than the given measure. -genLargeInvalidTx :: TheMeasure -> Gen TestTx -genLargeInvalidTx (IgnoringOverflow sz) = go Set.empty - where - go ins = case isLargeTx ins of - Just tx -> pure tx - Nothing -> do - newTxIn <- arbitrary - go (Set.insert newTxIn ins) - - isLargeTx :: Set TxIn -> Maybe TestTx - isLargeTx ins = do - let outs = [] - tx = mkSimpleGenTx $ Tx DoNotExpire ins outs - guard $ genTxSize tx > sz - pure tx - --- | Apply a transaction to the ledger --- --- We don't have blocks in this test, but transactions only. In this function --- we pretend the transaction /is/ a block, apply it to the UTxO, and then --- update the tip of the ledger state, incrementing the slot number and faking --- a hash. -applyTxToLedger :: LedgerConfig TestBlock - -> LedgerState TestBlock - -> TestTx - -> Except TestTxError (LedgerState TestBlock) -applyTxToLedger cfg (SimpleLedgerState mockState) tx = - mkNewLedgerState <$> updateMockUTxO mockCfg dummy tx mockState - where - mockCfg = simpleLedgerMockConfig cfg - - -- All expiries in this test are 'DoNotExpire', so the current time is - -- irrelevant. - dummy :: SlotNo - dummy = 0 - - mkNewLedgerState mockState' = - SimpleLedgerState mockState' { mockTip = BlockPoint slot' hash' } - - slot' = case pointSlot $ mockTip mockState of - Origin -> 0 - NotOrigin s -> succ s - - -- A little trick to instantiate the phantom parameter of 'Hash' (and - -- 'HeaderHash') with 'TestBlock' while actually hashing the slot number: - -- use a custom serialiser to instantiate the phantom type parameter with - -- @Header TestBlock@, but actually encode the slot number instead. - hash' :: HeaderHash TestBlock - hash' = hashWithSerialiser fakeEncodeHeader (error "fake header") - - fakeEncodeHeader :: Header TestBlock -> Encoding - fakeEncodeHeader _ = toCBOR slot' - {------------------------------------------------------------------------------- TestSetupWithTxs -------------------------------------------------------------------------------} @@ -705,7 +520,10 @@ instance Arbitrary TestSetupWithTxs where shrinkList (const []) . map fst $ txs ] -revalidate :: TestSetup -> [TestTx] -> ([(TestTx, Either TestTxError ())], LedgerState TestBlock) +revalidate :: + TestSetup + -> [TestTx] + -> ([(TestTx, Either TestTxError ())], LedgerState TestBlock ValuesMK) revalidate TestSetup { testLedgerCfg, testLedgerState, testInitialTxs } = validateTxs testLedgerCfg initLedgerState where @@ -751,6 +569,30 @@ instance Arbitrary TestSetupWithTxsInMempool where -- TODO shrink +data TestSetupWithTxsInMempoolToRemove = + TestSetupWithTxsInMempoolToRemove TestSetup (NE.NonEmpty TestTx) + deriving (Show) + +instance Arbitrary TestSetupWithTxsInMempoolToRemove where + arbitrary = fmap convertToRemove + $ arbitrary `suchThat` thereIsAtLeastOneTx + + shrink = fmap convertToRemove + . filter thereIsAtLeastOneTx + . shrink + . revertToRemove + +thereIsAtLeastOneTx :: TestSetupWithTxsInMempool -> Bool +thereIsAtLeastOneTx (TestSetupWithTxsInMempool _ txs) = not $ null txs + +convertToRemove :: TestSetupWithTxsInMempool -> TestSetupWithTxsInMempoolToRemove +convertToRemove (TestSetupWithTxsInMempool ts txs) = + TestSetupWithTxsInMempoolToRemove ts (NE.fromList txs) + +revertToRemove :: TestSetupWithTxsInMempoolToRemove -> TestSetupWithTxsInMempool +revertToRemove (TestSetupWithTxsInMempoolToRemove ts txs) = + TestSetupWithTxsInMempool ts (NE.toList txs) + {------------------------------------------------------------------------------- TestMempool: a mempool with random contents -------------------------------------------------------------------------------} @@ -778,7 +620,7 @@ data TestMempool m = TestMempool , addTxsToLedger :: [TestTx] -> STM m [Either TestTxError ()] -- | Return the current ledger. - , getCurrentLedger :: STM m (LedgerState TestBlock) + , getCurrentLedger :: STM m (LedgerState TestBlock ValuesMK) } -- NOTE: at the end of the test, this function also checks whether the Mempool @@ -814,7 +656,14 @@ withTestMempool setup@TestSetup {..} prop = -- Set up the LedgerInterface varCurrentLedgerState <- uncheckedNewTVarM testLedgerState let ledgerInterface = LedgerInterface - { getCurrentLedgerState = readTVar varCurrentLedgerState + { getCurrentLedgerState = forgetLedgerTables <$> readTVar varCurrentLedgerState + , getLedgerTablesAtFor = \pt txs -> do + let keys = List.foldl' (<>) emptyLedgerTables + $ map getTransactionKeySets txs + st <- atomically $ readTVar varCurrentLedgerState + if castPoint (getTip st) == pt + then pure $ Just $ restrictValues' st keys + else pure Nothing } -- Set up the Tracer @@ -830,10 +679,11 @@ withTestMempool setup@TestSetup {..} prop = testMempoolCapOverride tracer result <- addTxs mempool testInitialTxs + -- the invalid transactions are reported in the same order they were -- added, so the first error is not the result of a cascade sequence_ - [ error $ "Invalid initial transaction: " <> condense invalidTx + [ error $ "Invalid initial transaction: " <> condense invalidTx <> " because of error " <> show _err | MempoolTxRejected invalidTx _err <- result ] @@ -855,7 +705,7 @@ withTestMempool setup@TestSetup {..} prop = return $ res .&&. validContents addTxToLedger :: forall m. IOLike m - => StrictTVar m (LedgerState TestBlock) + => StrictTVar m (LedgerState TestBlock ValuesMK) -> TestTx -> STM m (Either TestTxError ()) addTxToLedger varCurrentLedgerState tx = do @@ -867,7 +717,7 @@ withTestMempool setup@TestSetup {..} prop = return $ Right () addTxsToLedger :: forall m. IOLike m - => StrictTVar m (LedgerState TestBlock) + => StrictTVar m (LedgerState TestBlock ValuesMK) -> [TestTx] -> STM m [(Either TestTxError ())] addTxsToLedger varCurrentLedgerState txs = @@ -875,7 +725,7 @@ withTestMempool setup@TestSetup {..} prop = -- | Check whether the transactions in the 'MempoolSnapshot' are valid -- w.r.t. the current ledger state. - checkMempoolValidity :: LedgerState TestBlock + checkMempoolValidity :: LedgerState TestBlock ValuesMK -> MempoolSnapshot TestBlock -> Property checkMempoolValidity ledgerState @@ -884,13 +734,20 @@ withTestMempool setup@TestSetup {..} prop = , snapshotSlotNo } = case runExcept $ repeatedlyM - (fmap fst .: applyTx testLedgerCfg DoNotIntervene snapshotSlotNo) - txs + applyTx' + [ txForgetValidated tx | (tx, _, _) <- snapshotTxs ] (TickedSimpleLedgerState ledgerState) of Right _ -> property True Left e -> counterexample (mkErrMsg e) $ property False where - txs = map (txForgetValidated . prjTx) snapshotTxs + applyTx' tx st = do + st' <- applyTx testLedgerCfg + DoNotIntervene + snapshotSlotNo + tx + st + pure $ applyDiffs st (fst st') + mkErrMsg e = "At the end of the test, the Mempool contents were invalid: " <> show e @@ -1186,16 +1043,17 @@ executeAction testMempool action = case action of False RemoveTxs txs -> do - removeTxs mempool (map txId txs) + let txs' = NE.fromList $ map txId txs + removeTxs mempool txs' tracedManuallyRemovedTxs <- expectTraceEvent $ \case TraceMempoolManuallyRemovedTxs txIds _ _ -> Just txIds _ -> Nothing - return $ if concat tracedManuallyRemovedTxs == map txId txs + return $ if concatMap NE.toList tracedManuallyRemovedTxs == map txId txs then property True else counterexample ("Expected a TraceMempoolManuallyRemovedTxs event for " <> condense txs <> " but got " <> - condense tracedManuallyRemovedTxs) + condense (map NE.toList tracedManuallyRemovedTxs)) False where @@ -1231,7 +1089,7 @@ genActions genNbToAdd = go testInitLedger mempty mempty where cfg = testLedgerConfigNoSizeLimits - go :: LedgerState TestBlock + go :: LedgerState TestBlock ValuesMK -- ^ Current ledger state with the contents of the Mempool applied -> [TestTx] -- ^ Transactions currently in the Mempool -> [Action] -- ^ Already generated actions @@ -1246,7 +1104,7 @@ genActions genNbToAdd = go testInitLedger mempty mempty -- transactions to remove -> do tx <- elements txs - let ((vTxs, iTxs), ledger') = first (partition (isRight . snd)) $ + let ((vTxs, iTxs), ledger') = first (List.partition (isRight . snd)) $ validateTxs cfg testInitLedger (filter (/= tx) txs) txs' = map fst vTxs removedTxs = tx : map fst iTxs diff --git a/ouroboros-consensus/test/consensus-test/Test/Consensus/Mempool/Fairness.hs b/ouroboros-consensus/test/consensus-test/Test/Consensus/Mempool/Fairness.hs index 16a10acf67..b49f7f16fb 100644 --- a/ouroboros-consensus/test/consensus-test/Test/Consensus/Mempool/Fairness.hs +++ b/ouroboros-consensus/test/consensus-test/Test/Consensus/Mempool/Fairness.hs @@ -19,11 +19,13 @@ import Control.Monad (forever, void) import qualified Control.Tracer as Tracer import Data.Foldable (asum) import qualified Data.List as List +import Data.List.NonEmpty hiding (length) import Data.Void (Void, vacuous) import Ouroboros.Consensus.Config.SecurityParam as Consensus import qualified Ouroboros.Consensus.HardFork.History as HardFork import Ouroboros.Consensus.Ledger.SupportsMempool (ByteSize32 (..)) import qualified Ouroboros.Consensus.Ledger.SupportsMempool as Mempool +import Ouroboros.Consensus.Ledger.Tables.Utils import Ouroboros.Consensus.Mempool (Mempool) import qualified Ouroboros.Consensus.Mempool as Mempool import qualified Ouroboros.Consensus.Mempool.Capacity as Mempool @@ -81,9 +83,13 @@ testTxSizeFairness TestParams { mempoolMaxCapacity, smallTxSize, largeTxSize, nr -- Obtain a mempool. ---------------------------------------------------------------------------- let + ledgerItf :: Mempool.LedgerInterface IO TestBlock ledgerItf = Mempool.LedgerInterface { - Mempool.getCurrentLedgerState = pure $ testInitLedgerWithState () - } + Mempool.getCurrentLedgerState = pure $ + testInitLedgerWithState NoPayLoadDependentState + , Mempool.getLedgerTablesAtFor = \_ _ -> pure $ + Just emptyLedgerTables + } eraParams = HardFork.defaultEraParams (Consensus.SecurityParam 10) (Time.slotLengthFromSec 2) @@ -201,7 +207,7 @@ remover mempool total = do -- transactions. threadDelay 1000 gtx <- atomically $ getATxFromTheMempool - Mempool.removeTxs mempool [Mempool.txId gtx] + Mempool.removeTxs mempool (Mempool.txId gtx :| []) loop (unGenTx gtx:txs) (n-1) where getATxFromTheMempool = diff --git a/ouroboros-consensus/test/consensus-test/Test/Consensus/Mempool/Fairness/TestBlock.hs b/ouroboros-consensus/test/consensus-test/Test/Consensus/Mempool/Fairness/TestBlock.hs index 82131b3479..1669b17ada 100644 --- a/ouroboros-consensus/test/consensus-test/Test/Consensus/Mempool/Fairness/TestBlock.hs +++ b/ouroboros-consensus/test/consensus-test/Test/Consensus/Mempool/Fairness/TestBlock.hs @@ -8,18 +8,24 @@ module Test.Consensus.Mempool.Fairness.TestBlock ( TestBlock + , TestBlock.PayloadDependentState (..) , Tx , mkGenTx , txSize , unGenTx ) where +import Codec.Serialise import Control.DeepSeq (NFData) +import Data.Void (Void) import GHC.Generics (Generic) import NoThunks.Class (NoThunks) import qualified Ouroboros.Consensus.Block as Block +import Ouroboros.Consensus.Ledger.Abstract (convertMapKind, + trivialLedgerTables) import qualified Ouroboros.Consensus.Ledger.Abstract as Ledger import qualified Ouroboros.Consensus.Ledger.SupportsMempool as Ledger +import Ouroboros.Consensus.Ticked (Ticked1) import qualified Test.Util.TestBlock as TestBlock import Test.Util.TestBlock (TestBlockWith) @@ -43,11 +49,15 @@ data Tx = Tx { txNumber :: Int, txSize :: Ledger.ByteSize32 } -------------------------------------------------------------------------------} instance TestBlock.PayloadSemantics Tx where - type PayloadDependentState Tx = () + data instance PayloadDependentState Tx mk = NoPayLoadDependentState + deriving (Show, Eq, Ord, Generic, NoThunks) + deriving anyclass Serialise type PayloadDependentError Tx = () - applyPayload st _tx = Right st + applyPayload NoPayLoadDependentState _tx = Right NoPayLoadDependentState + + getPayloadKeySets = const trivialLedgerTables data instance Block.CodecConfig TestBlock = TestBlockCodecConfig @@ -82,12 +92,24 @@ mkGenTx :: Int -> Ledger.ByteSize32 -> Ledger.GenTx TestBlock mkGenTx anId aSize = TestBlockGenTx $ Tx { txNumber = anId, txSize = aSize } instance Ledger.LedgerSupportsMempool TestBlock where - applyTx _cfg _shouldIntervene _slot gtx st = pure (st, ValidatedGenTx gtx) - - reapplyTx _cfg _slot _gtx gst = pure gst + applyTx _cfg _shouldIntervene _slot gtx st = pure ( + TestBlock.TickedTestLedger + $ convertMapKind + $ TestBlock.getTickedTestLedger + st + , ValidatedGenTx gtx + ) + + reapplyTx _cfg _slot _gtx gst = pure + $ TestBlock.TickedTestLedger + $ convertMapKind + $ TestBlock.getTickedTestLedger + gst txForgetValidated (ValidatedGenTx tx) = tx + getTransactionKeySets _ = trivialLedgerTables + instance Ledger.TxLimits TestBlock where type TxMeasure TestBlock = Ledger.IgnoringOverflow Ledger.ByteSize32 @@ -99,7 +121,21 @@ instance Ledger.TxLimits TestBlock where txMeasure _cfg _st = pure . Ledger.IgnoringOverflow . txSize . unGenTx {------------------------------------------------------------------------------- - Ledger support + Ledger support (empty tables) -------------------------------------------------------------------------------} type instance Ledger.ApplyTxErr TestBlock = () + +type instance Ledger.Key (Ledger.LedgerState TestBlock) = Void +type instance Ledger.Value (Ledger.LedgerState TestBlock) = Void + +instance Ledger.HasLedgerTables (Ledger.LedgerState TestBlock) +instance Ledger.HasLedgerTables (Ticked1 (Ledger.LedgerState TestBlock)) +instance Ledger.LedgerTablesAreTrivial (Ledger.LedgerState TestBlock) where + convertMapKind (TestBlock.TestLedger x NoPayLoadDependentState) = + TestBlock.TestLedger x NoPayLoadDependentState +instance Ledger.LedgerTablesAreTrivial (Ticked1 (Ledger.LedgerState TestBlock)) where + convertMapKind (TestBlock.TickedTestLedger x) = + TestBlock.TickedTestLedger (Ledger.convertMapKind x) +instance Ledger.CanStowLedgerTables (Ledger.LedgerState TestBlock) +instance Ledger.CanSerializeLedgerTables (Ledger.LedgerState TestBlock) diff --git a/ouroboros-consensus/test/consensus-test/Test/Consensus/Mempool/StateMachine.hs b/ouroboros-consensus/test/consensus-test/Test/Consensus/Mempool/StateMachine.hs new file mode 100644 index 0000000000..57af8e8b20 --- /dev/null +++ b/ouroboros-consensus/test/consensus-test/Test/Consensus/Mempool/StateMachine.hs @@ -0,0 +1,943 @@ +{-# LANGUAGE CPP #-} +{-# LANGUAGE DeriveAnyClass #-} +{-# LANGUAGE DeriveGeneric #-} +{-# LANGUAGE DerivingStrategies #-} +{-# LANGUAGE FlexibleContexts #-} +{-# LANGUAGE FlexibleInstances #-} +{-# LANGUAGE GADTs #-} +{-# LANGUAGE GeneralizedNewtypeDeriving #-} +{-# LANGUAGE InstanceSigs #-} +{-# LANGUAGE LambdaCase #-} +{-# LANGUAGE NamedFieldPuns #-} +{-# LANGUAGE PolyKinds #-} +{-# LANGUAGE ScopedTypeVariables #-} +{-# LANGUAGE StandaloneDeriving #-} +{-# LANGUAGE TupleSections #-} +{-# LANGUAGE TypeApplications #-} +{-# LANGUAGE TypeOperators #-} +{-# LANGUAGE UndecidableInstances #-} + +{-# OPTIONS_GHC -Wno-orphans #-} + +#if __GLASGOW_HASKELL__ >= 910 +{-# OPTIONS_GHC -Wno-x-partial #-} +#endif + +-- | See 'MakeAtomic'. +module Test.Consensus.Mempool.StateMachine (tests) where + +import Cardano.Slotting.Slot +import Control.Arrow (second) +import Control.Concurrent.Class.MonadSTM.Strict.TChan +import Control.Monad (void) +import Control.Monad.Except (runExcept) +import qualified Control.Tracer as CT (Tracer (..), traceWith) +import Data.Bool (bool) +import Data.Foldable hiding (toList) +import Data.Function (on) +import qualified Data.Map.Strict as Map +import Data.Maybe (fromMaybe) +import qualified Data.Measure as Measure +import Data.Proxy +import Data.Set (Set) +import qualified Data.Set as Set +import Data.TreeDiff +import qualified Data.TreeDiff.OMap as TD +import GHC.Generics +import Ouroboros.Consensus.Block +import Ouroboros.Consensus.HeaderValidation +import Ouroboros.Consensus.Ledger.Basics +import Ouroboros.Consensus.Ledger.SupportsMempool +import Ouroboros.Consensus.Ledger.SupportsProtocol + (LedgerSupportsProtocol) +import Ouroboros.Consensus.Ledger.Tables.Utils +import Ouroboros.Consensus.Mempool +import Ouroboros.Consensus.Mempool.Impl.Common (tickLedgerState) +import Ouroboros.Consensus.Mempool.TxSeq +import Ouroboros.Consensus.Mock.Ledger.Address +import Ouroboros.Consensus.Mock.Ledger.Block +import Ouroboros.Consensus.Mock.Ledger.State +import Ouroboros.Consensus.Mock.Ledger.UTxO (Expiry, Tx, TxIn, TxOut) +import qualified Ouroboros.Consensus.Mock.Ledger.UTxO as Mock +import Ouroboros.Consensus.Util +import Ouroboros.Consensus.Util.Condense (condense) +import Ouroboros.Consensus.Util.IOLike hiding (bracket) +import Test.Cardano.Ledger.TreeDiff () +import Test.Consensus.Mempool.Util (TestBlock, applyTxToLedger, + genTxs, genValidTxs, testInitLedger, + testLedgerConfigNoSizeLimits) +import Test.QuickCheck +import Test.QuickCheck.Monadic +import Test.StateMachine hiding ((:>)) +import Test.StateMachine.DotDrawing +import qualified Test.StateMachine.Types as QC +import Test.StateMachine.Types (History (..), HistoryEvent (..)) +import qualified Test.StateMachine.Types.Rank2 as Rank2 +import Test.Tasty +import Test.Tasty.QuickCheck +import Test.Util.Orphans.ToExpr () +import Test.Util.ToExpr () + +{------------------------------------------------------------------------------- + Datatypes +-------------------------------------------------------------------------------} + +-- | Whether the LedgerDB should be wiped out +data ModifyDB = KeepDB | ClearDB deriving (Generic, ToExpr, NoThunks) + +instance Arbitrary ModifyDB where + arbitrary = bool KeepDB ClearDB <$> arbitrary + +keepsDB :: ModifyDB -> Bool +keepsDB KeepDB = True +keepsDB ClearDB = False + +-- | The model +data Model blk r = Model { + -- | The current tip on the mempool + modelMempoolIntermediateState :: !(TickedLedgerState blk ValuesMK) + + -- | The current list of transactions + , modelTxs :: ![(GenTx blk, TicketNo)] + + -- | The current size of the mempool + , modelCurrentSize :: !(TxMeasure blk) + + , modelCapacity :: !(TxMeasure blk) + + -- | Last seen ticket number + -- + -- This indicates how many transactions have ever been added to the mempool. + , modelLastSeenTicketNo :: !TicketNo + + , modelConfig :: !(LedgerCfg (LedgerState blk)) + + -- * LedgerDB + + -- | The current tip on the ledgerdb + , modelLedgerDBTip :: !(LedgerState blk ValuesMK) + + -- | The old states which are still on the LedgerDB. These should + -- technically be ancestors of the tip, but for the mempool we don't care. + , modelReachableStates :: !(Set (LedgerState blk ValuesMK)) + + -- | States which were previously on the LedgerDB. We keep these so that + -- 'ChangeLedger' does not generate a different state with the same hash. + , modelOtherStates :: !(Set (LedgerState blk ValuesMK)) + + } + +-- | The commands used by QSM +-- +-- We divide them in 'Action' which are the ones that we on purpose perform on +-- the mempool, and 'Event's which happen by external triggers. This is a mere +-- convenience, in the eyes of QSM they are the same thing. +data Command blk r = + Action !(Action blk r) + | Event !(Event blk r) + deriving (Generic1) + deriving (Rank2.Functor, Rank2.Foldable, Rank2.Traversable) + +-- | Actions on the mempool +data Action blk r = + -- | Add some transactions to the mempool + TryAddTxs ![GenTx blk] + | -- | Unconditionally sync with the ledger db + SyncLedger + | -- | Ask for the current snapshot + GetSnapshot + -- TODO: maybe add 'GetSnapshotFor (Point blk)', but this requires to keep + -- track of some more states to make it meaningful. + deriving (Generic1) + deriving (Rank2.Functor, Rank2.Foldable, Rank2.Traversable, CommandNames) + +-- | Events external to the mempool +data Event blk r = ChangeLedger + !(LedgerState blk ValuesMK) + !ModifyDB + deriving (Generic1) + deriving (Rank2.Functor, Rank2.Foldable, Rank2.Traversable, CommandNames) + +instance CommandNames (Command blk) where + cmdName (Action action) = cmdName action + cmdName (Event event) = cmdName event + + cmdNames :: forall r. Proxy (Command blk r) -> [String] + cmdNames _ = cmdNames (Proxy @(Action blk r)) + ++ cmdNames (Proxy @(Event blk r)) + +-- | Wether or not this test must be atomic. +-- +-- The reason behind this data type is that 'TryAddTxs' is on its nature prone +-- to race-conditions. And that is OK with us. For example take the following +-- sequence of commands: +-- +-- @@@ +-- TryAddTxs [Tx1, Tx2] || GetSnapshot +-- @@@ +-- +-- If we happen to hit the following interleaving: +-- +-- @@@ +-- AddTx Tx1; GetSnapshot; AddTx Tx2 +-- @@@ +-- +-- the model will never be able to reproduce the result of the snapshot. +-- +-- So in order to do a meaningful testing, what we do is: +-- +-- 1. Run a sequential test of actions ensuring that the responses of the model +-- and SUT match on 'GetSnaphsot'. This provides us with assurance that the +-- model works as expected on single-threaded/sequential scenarios. +-- +-- 2. Run a parallel test where 'TryAddTxs' is unitary (i.e. use the 'Atomic' +-- modifier) ensuring that the responses of the model and SUT match on +-- 'GetSnaphsot'. This ensures that there are no race conditions on this +-- case, or rephrased, that the operations on the mempool remain atomic even +-- if executed on separate threads. +-- +-- 3. Run a parallel test where 'TryAddTxs' is not unitary (using the +-- 'NonAtomic' modifier) and **NOT** checking the responses of the model +-- versus the SUT. This ensures that there are no deadlocks and no +-- errors/exceptions thrown when running in parallel. +-- +-- We believe that these test cover all the interesting cases and provide enough +-- assurance on the implementation of the Mempool. +data MakeAtomic = Atomic | NonAtomic | DontCare + +generator :: + ( Arbitrary (LedgerState blk ValuesMK) + , UnTick blk + , StandardHash blk + , GetTip (LedgerState blk) + ) + => MakeAtomic + -> (Int -> LedgerState blk ValuesMK -> Gen [GenTx blk]) + -- ^ Transaction generator based on an state + -> Model blk Symbolic + -> Maybe (Gen (Command blk Symbolic)) +generator ma gTxs model = + Just $ + frequency + [(100, + Action . TryAddTxs <$> case ma of + Atomic -> do + gTxs 1 . unTick $ modelMempoolIntermediateState + _ -> do + n <- getPositive <$> arbitrary + gTxs n . unTick $ modelMempoolIntermediateState + ) + , (10, pure $ Action SyncLedger) + , (10, do + ls <- oneof ([ arbitrary `suchThat` ( not + . flip elem (getTip modelLedgerDBTip + `Set.insert` Set.map getTip (modelOtherStates + `Set.union` modelReachableStates)) + . getTip) + ] ++ (if Set.null modelReachableStates then [] else [elements (Set.toList modelReachableStates)]) + ++ (if Set.null modelOtherStates then [] else [elements (Set.toList modelOtherStates)]) + ) + `suchThat` (not . (== (getTip modelLedgerDBTip)) . getTip) + Event . ChangeLedger ls <$> arbitrary) + , (10, pure $ Action GetSnapshot) + ] + where + Model{ + modelMempoolIntermediateState + , modelLedgerDBTip + , modelReachableStates + , modelOtherStates + } = model + +data Response blk r = + -- | Nothing to tell + Void + | -- | Return the contents of a snapshot + GotSnapshot ![(GenTx blk, TicketNo)] + deriving (Generic1) + deriving (Rank2.Functor, Rank2.Foldable, Rank2.Traversable) + +{------------------------------------------------------------------------------- + Model side +-------------------------------------------------------------------------------} + +initModel :: + ( LedgerSupportsMempool blk + , ValidateEnvelope blk + ) + => LedgerConfig blk + -> TxMeasure blk + -> LedgerState blk ValuesMK + -> Model blk r +initModel cfg capacity initialState = + Model { + modelMempoolIntermediateState = ticked + , modelReachableStates = Set.empty + , modelLedgerDBTip = initialState + , modelTxs = [] + , modelCurrentSize = Measure.zero + , modelLastSeenTicketNo = zeroTicketNo + , modelCapacity = capacity + , modelConfig = cfg + , modelOtherStates = Set.empty + } + where ticked = tick cfg initialState + +mock :: + Model blk Symbolic + -> Command blk Symbolic + -> GenSym (Response blk Symbolic) +mock model = \case + Action (TryAddTxs _) -> pure Void + Action SyncLedger -> pure Void + Action GetSnapshot -> pure $ GotSnapshot $ modelTxs model + Event (ChangeLedger _ _) -> pure Void + +{------------------------------------------------------------------------------- + Transitions +-------------------------------------------------------------------------------} + +doSync :: + ( ValidateEnvelope blk + , LedgerSupportsMempool blk + , Eq (TickedLedgerState blk ValuesMK) + ) + => Model blk r + -> Model blk r +doSync model = + if st == st' + then model + else + let + (validTxs, _tk, newSize, st'') = + foldTxs modelConfig zeroTicketNo modelCapacity Measure.zero st' $ map (second Just) modelTxs + + in + model { + modelMempoolIntermediateState = st'' + , modelTxs = validTxs + , modelCurrentSize = newSize + } + where + + st' = tick modelConfig modelLedgerDBTip + + Model { + modelMempoolIntermediateState = st + , modelLedgerDBTip + , modelTxs + , modelCapacity + , modelConfig + } = model + +doChangeLedger :: + (StandardHash blk, GetTip (LedgerState blk)) + => Model blk r + -> LedgerState blk ValuesMK + -> ModifyDB + -> Model blk r +doChangeLedger model l' b' = + model { modelLedgerDBTip = l' + , modelReachableStates = + if keepsDB b' + then l' `Set.delete` Set.insert modelLedgerDBTip modelReachableStates + else Set.empty + , modelOtherStates = + if keepsDB b' + then modelOtherStates + else modelLedgerDBTip `Set.insert` (modelOtherStates `Set.union` modelReachableStates) + } + where + Model { + modelLedgerDBTip + , modelReachableStates + , modelOtherStates + } = model + +doTryAddTxs :: + ( LedgerSupportsMempool blk + , ValidateEnvelope blk + , Eq (TickedLedgerState blk ValuesMK) + , Eq (GenTx blk) + ) + => Model blk r + -> [GenTx blk] + -> Model blk r +doTryAddTxs model [] = model +doTryAddTxs model txs = + case find ((castPoint (getTip st) ==) . getTip) + (Set.insert modelLedgerDBTip modelReachableStates) of + Nothing -> doTryAddTxs (doSync model) txs + Just _ -> + let nextTicket = succ $ modelLastSeenTicketNo model + (validTxs, tk, newSize, st'') = + foldTxs cfg nextTicket modelCapacity modelCurrentSize st $ map (,Nothing) txs + modelTxs' = modelTxs ++ validTxs + in + model { + modelMempoolIntermediateState = st'' + , modelTxs = modelTxs' + , modelLastSeenTicketNo = pred tk + , modelCurrentSize = newSize + } + where + Model { + modelMempoolIntermediateState = st + , modelTxs + , modelCurrentSize + , modelReachableStates + , modelLedgerDBTip + , modelConfig = cfg + , modelCapacity + } = model + +transition :: + ( Eq (GenTx blk) + , Eq (TickedLedgerState blk ValuesMK) + , LedgerSupportsMempool blk + , ToExpr (GenTx blk) + , ValidateEnvelope blk + , ToExpr (Command blk r) + ) + => Model blk r + -> Command blk r + -> Response blk r + -> Model blk r +transition model cmd resp = case (cmd, resp) of + (Action (TryAddTxs txs), Void) -> doTryAddTxs model txs + (Event (ChangeLedger l b), Void) -> doChangeLedger model l b + (Action GetSnapshot, GotSnapshot{}) -> model + (Action SyncLedger, Void) -> doSync model + _ -> error $ "mismatched command " + <> show cmd + <> " and response " + <> show resp + +{------------------------------------------------------------------------------- + Ledger helper functions +-------------------------------------------------------------------------------} + +-- | Apply a list of transactions short-circuiting if the mempool gets full. +-- Emulates almost exactly the behaviour of 'implTryTryAddTxs'. +foldTxs :: + forall blk. + ( LedgerSupportsMempool blk + , BasicEnvelopeValidation blk + ) + => LedgerConfig blk + -> TicketNo + -> TxMeasure blk + -> TxMeasure blk + -> TickedLedgerState blk ValuesMK + -> [(GenTx blk, Maybe TicketNo)] + -> ( [(GenTx blk, TicketNo)] + , TicketNo + , TxMeasure blk + , TickedLedgerState blk ValuesMK + ) +foldTxs cfg nextTk capacity initialFilled initialState = + go ([], nextTk, initialFilled, initialState) + where + go (acc, tk, curSize, st) [] = ( reverse acc + , tk + , curSize + , st + ) + go (acc, tk, curSize, st) ((tx, txtk):next) = + let slot = case getTipSlot st of + Origin -> minimumPossibleSlotNo (Proxy @blk) + At v -> v + 1 + in + case runExcept $ (,) <$> txMeasure cfg st tx <*> applyTx cfg DoNotIntervene slot tx st of + Left{} -> + go ( acc + , tk + , curSize + , st + ) + next + Right (txsz, (st', vtx)) + | (curSize Measure.<= curSize `Measure.plus` txsz + -- Overflow + && curSize `Measure.plus` txsz Measure.<= capacity + ) + + -- fits + -> + go ( (txForgetValidated vtx, fromMaybe tk txtk):acc + , succ tk + , curSize `Measure.plus` txsz + , applyDiffs st st' + ) + next + | otherwise -> + go ( acc + , tk + , curSize + , st + ) + next + +tick :: + ( ValidateEnvelope blk + , LedgerSupportsMempool blk + ) + => LedgerConfig blk + -> LedgerState blk ValuesMK + -> TickedLedgerState blk ValuesMK +tick cfg st = applyDiffs st ticked + where + ticked = snd + . tickLedgerState cfg + . ForgeInUnknownSlot + . forgetLedgerTables + $ st + +{------------------------------------------------------------------------------- + SUT side +-------------------------------------------------------------------------------} + +-- | The System Under Test +data SUT m blk = + SUT + !(Mempool m blk) + -- ^ A Mempool + !(StrictTVar m (MockedLedgerDB blk)) + -- ^ Emulates a ledger db to the extent needed by the ledger interface. + deriving Generic + +deriving instance ( NoThunks (Mempool m blk) + , NoThunks (StrictTVar m (MockedLedgerDB blk)) + ) => NoThunks (SUT m blk) + +-- | A very minimal mock of the ledger db. +-- +-- The ledger interface will serve the values from this datatype. +data MockedLedgerDB blk = MockedLedgerDB { + -- | The current LedgerDB tip + ldbTip :: !(LedgerState blk ValuesMK) + -- | States which are still reachable in the LedgerDB + , reachableTips :: !(Set (LedgerState blk ValuesMK)) + -- | States which are no longer reachable in the LedgerDB + , otherStates :: !(Set (LedgerState blk ValuesMK)) + } deriving (Generic) + +-- | Create a ledger interface and provide the tvar to modify it when switching +-- ledgers. +newLedgerInterface :: + ( MonadSTM m + , NoThunks (MockedLedgerDB blk) + , LedgerSupportsMempool blk + ) + => LedgerState blk ValuesMK + -> m (LedgerInterface m blk, StrictTVar m (MockedLedgerDB blk)) +newLedgerInterface initialLedger = do + t <- newTVarIO $ MockedLedgerDB initialLedger Set.empty Set.empty + pure (LedgerInterface { + getCurrentLedgerState = forgetLedgerTables . ldbTip <$> readTVar t + , getLedgerTablesAtFor = \pt txs -> do + let keys = foldl' (<>) emptyLedgerTables + $ map getTransactionKeySets txs + MockedLedgerDB ti oldReachableTips _ <- atomically $ readTVar t + if pt == castPoint (getTip ti) -- if asking for tables at the tip of the + -- ledger db + then + let tbs = ltliftA2 f keys $ projectLedgerTables ti + in pure $ Just tbs + else case find ((castPoint pt ==). getTip) oldReachableTips of + Nothing -> pure Nothing + Just mtip -> + if pt == castPoint (getTip mtip) + -- if asking for tables at some still reachable state + then + let tbs = ltliftA2 f keys $ projectLedgerTables mtip + in pure $ Just tbs + else + -- if asking for tables at other point or at the mempool tip but + -- it is not reachable + pure Nothing + }, t) + where + f :: Ord k => KeysMK k v -> ValuesMK k v -> ValuesMK k v + f (KeysMK s) (ValuesMK v) = + ValuesMK (Map.restrictKeys v s) + +-- | Make a SUT +mkSUT :: + forall m blk. ( NoThunks (MockedLedgerDB blk) + , IOLike m + , LedgerSupportsProtocol blk + , LedgerSupportsMempool blk + , HasTxId (GenTx blk) + ) + => LedgerConfig blk + -> LedgerState blk ValuesMK + -> m (SUT m blk, CT.Tracer m String) +mkSUT cfg initialLedger = do + (lif, t) <- newLedgerInterface initialLedger + trcrChan <- atomically newTChan :: m (StrictTChan m (Either String (TraceEventMempool blk))) + let trcr = CT.Tracer $ -- Dbg.traceShowM @(Either String (TraceEventMempool blk)) + atomically . writeTChan trcrChan + mempool <- openMempoolWithoutSyncThread + lif + cfg + (MempoolCapacityBytesOverride $ unIgnoringOverflow txMaxBytes') + (CT.Tracer $ CT.traceWith trcr . Right) + pure (SUT mempool t, CT.Tracer $ atomically . writeTChan trcrChan . Left) + +semantics :: + ( MonadSTM m + , LedgerSupportsMempool blk +#if __GLASGOW_HASKELL__ > 810 + , ValidateEnvelope blk +#endif + ) => + CT.Tracer m String + -> Command blk Concrete + -> StrictTVar m (SUT m blk) + -> m (Response blk Concrete) +semantics trcr cmd r = do + SUT m t <- atomically $ readTVar r + case cmd of + Action (TryAddTxs txs) -> do + + mapM_ (addTx m AddTxForRemotePeer) txs + pure Void + + Action SyncLedger -> do + void $ syncWithLedger m + pure Void + + Action GetSnapshot -> do + txs <- snapshotTxs <$> atomically (getSnapshot m) + pure $ GotSnapshot [ (txForgetValidated vtx, tk) | (vtx, tk, _) <- txs ] + + Event (ChangeLedger l' newReachable) -> do + CT.traceWith trcr $ "ChangingLedger to " <> show (getTip l') + atomically $ do + MockedLedgerDB ledgerTip oldReachableTips oldUnreachableTips <- readTVar t + if getTip l' == getTip ledgerTip + then if keepsDB newReachable + then pure () + else + let (newReachableTips, newUnreachableTips) = (Set.empty, + Set.insert ledgerTip + $ Set.union oldUnreachableTips oldReachableTips + ) + in writeTVar t (MockedLedgerDB l' newReachableTips newUnreachableTips) + else + let + (newReachableTips, newUnreachableTips) = + if keepsDB newReachable + then (Set.insert ledgerTip oldReachableTips, oldUnreachableTips) + else (Set.empty, + Set.insert ledgerTip + $ Set.union oldUnreachableTips oldReachableTips + ) + in + writeTVar t (MockedLedgerDB l' newReachableTips newUnreachableTips) + pure Void + +{------------------------------------------------------------------------------- + Conditions +-------------------------------------------------------------------------------} + +precondition :: Model blk Symbolic -> Command blk Symbolic -> Logic +-- precondition cfg Model {modelCurrentSize} (Action (TryAddTxs txs)) = +-- Boolean $ not (null txs) && modelCurrentSize > 0 && sum (map tSize rights $ init txs) < modelCurrentSize +precondition _ _ = Top + +postcondition :: + ( LedgerSupportsMempool blk + , Eq (GenTx blk) +-- , Show (TickedLedgerState blk ValuesMK) + ) + => Model blk Concrete + -> Command blk Concrete + -> Response blk Concrete + -> Logic +postcondition model (Action GetSnapshot) (GotSnapshot txs) = + -- Annotate (show $ modelMempoolIntermediateState model) $ + modelTxs model .== txs +postcondition _ _ _ = Top + +noPostcondition :: + Model blk Concrete + -> Command blk Concrete + -> Response blk Concrete + -> Logic +noPostcondition _ _ _ = Top + +shrinker :: Model blk Symbolic + -> Command blk Symbolic + -> [Command blk Symbolic] +shrinker _ (Action (TryAddTxs txs)) = + Action . TryAddTxs <$> shrinkList shrinkNothing txs +shrinker _ _ = [] + +{------------------------------------------------------------------------------- + State Machine +-------------------------------------------------------------------------------} + +sm :: + ( LedgerSupportsMempool blk + , IOLike m +#if __GLASGOW_HASKELL__ > 810 + , ValidateEnvelope blk +#endif + ) + => StateMachine (Model blk) (Command blk) m (Response blk) + -> CT.Tracer m String + -> StrictTVar m (SUT m blk) + -> StateMachine (Model blk) (Command blk) m (Response blk) +sm sm0 trcr ior = sm0 { + QC.semantics = \c -> semantics trcr c ior + } + +smUnused :: + ( blk ~ TestBlock + , LedgerSupportsMempool blk + , LedgerSupportsProtocol blk + , Monad m + ) + => LedgerConfig blk + -> LedgerState blk ValuesMK + -> TxMeasure blk + -> MakeAtomic + -> (Int -> LedgerState blk ValuesMK -> Gen [GenTx blk]) + -> StateMachine (Model blk) (Command blk) m (Response blk) +smUnused cfg initialState capacity ma gTxs = + StateMachine { + QC.initModel = initModel cfg capacity initialState + , QC.transition = transition + , QC.precondition = precondition + , QC.postcondition = + case ma of + NonAtomic -> noPostcondition + Atomic -> postcondition + DontCare -> postcondition + , QC.invariant = Nothing + , QC.generator = generator ma gTxs + , QC.shrinker = shrinker + , QC.semantics = undefined + , QC.mock = mock + , QC.cleanup = noCleanup + } + +{------------------------------------------------------------------------------- + Properties +-------------------------------------------------------------------------------} + +prop_mempoolSequential :: + forall blk . + ( HasTxId (GenTx blk) + , blk ~ TestBlock + , LedgerSupportsMempool blk +#if __GLASGOW_HASKELL__ > 900 + , LedgerSupportsProtocol blk +#endif + ) + => LedgerConfig blk + -> TxMeasure blk + -> LedgerState blk ValuesMK + -- ^ Initial state + -> (Int -> LedgerState blk ValuesMK -> Gen [GenTx blk]) + -- ^ Transaction generator + -> Property +prop_mempoolSequential cfg capacity initialState gTxs = forAllCommands sm0 Nothing $ + \cmds -> monadicIO + (do + (sut, trcr) <- run $ mkSUT cfg initialState + ior <- run $ newTVarIO sut + let sm' = sm sm0 trcr ior + (hist, model, res) <- runCommands sm' cmds + prettyCommands sm0 hist + $ checkCommandNames cmds + $ tabulate "Command sequence length" + [QC.lengthCommands cmds `bucketiseBy` 10] + $ tabulate "Maximum ticket number" + [(\(TicketNo t) -> t) (modelLastSeenTicketNo model) `bucketiseBy` 5] + $ tabulate "Number of txs to add" + [ length txs `bucketiseBy` 10 + | (_, Invocation (Action (TryAddTxs txs)) _) <- unHistory hist + ] + $ res === Ok + ) + where + sm0 = smUnused cfg initialState capacity DontCare gTxs + + bucketiseBy v n = + let + l = (v `div` n) * n + in + "[" <> show l <> "-" <> show (l + n) <> ")" + +prop_mempoolParallel :: + ( HasTxId (GenTx blk) + , blk ~ TestBlock + , LedgerSupportsMempool blk +#if __GLASGOW_HASKELL__ > 900 + , LedgerSupportsProtocol blk +#endif + ) + => LedgerConfig blk + -> TxMeasure blk + -> LedgerState blk ValuesMK + -> MakeAtomic + -> (Int -> LedgerState blk ValuesMK -> Gen [GenTx blk]) + -> Property +prop_mempoolParallel cfg capacity initialState ma gTxs = forAllParallelCommandsNTimes sm0 Nothing 100 $ + \cmds -> monadicIO $ do + (sut, trcr) <- run $ mkSUT cfg initialState + ior <- run $ newTVarIO sut + let sm' = sm sm0 trcr ior + res <- runParallelCommands sm' cmds + prettyParallelCommandsWithOpts + cmds + (Just (GraphOptions "./mempoolParallel.png" Png)) + res + where + sm0 = smUnused cfg initialState capacity ma gTxs + +-- | See 'MakeAtomic' on the reasoning behind having these tests. +tests :: TestTree +tests = testGroup "QSM" + [ testProperty "sequential" + $ withMaxSuccess 1000 $ prop_mempoolSequential testLedgerConfigNoSizeLimits txMaxBytes' testInitLedger + $ \i -> fmap (fmap fst . fst) . genTxs i + , testGroup "parallel" + [ testProperty "atomic" + $ withMaxSuccess 1000 $ prop_mempoolParallel testLedgerConfigNoSizeLimits txMaxBytes' testInitLedger Atomic + $ \i -> fmap (fmap fst . fst) . genTxs i + , testProperty "non atomic" + $ withMaxSuccess 10 $ prop_mempoolParallel testLedgerConfigNoSizeLimits txMaxBytes' testInitLedger NonAtomic + $ \i -> fmap (fmap fst . fst) . genTxs i + ] + ] + +{------------------------------------------------------------------------------- + Instances +-------------------------------------------------------------------------------} + +-- | The 'TestBlock' txMaxBytes is fixed to a very high number. We use this +-- local declaration to have a mempool that sometimes fill but still don't make +-- it configurable. +txMaxBytes' :: IgnoringOverflow ByteSize32 +txMaxBytes' = IgnoringOverflow $ ByteSize32 maxBound + +instance (StandardHash blk, GetTip (LedgerState blk)) => + Eq (LedgerState blk ValuesMK) where + (==) = (==) `on` getTip + +instance (UnTick blk, StandardHash blk, GetTip (LedgerState blk)) => + Eq (TickedLedgerState blk ValuesMK) where + (==) = (==) `on` (getTip . unTick) + +instance (StandardHash blk, GetTip (LedgerState blk)) => + Ord (LedgerState blk ValuesMK) where + compare = compare `on` getTip + +instance (Eq (Validated (GenTx blk)), m ~ TxMeasure blk, Eq m) => Eq (TxSeq m (Validated (GenTx blk))) where + s1 == s2 = toList s1 == toList s2 + +instance NoThunks (Mempool IO TestBlock) where + showTypeOf _ = showTypeOf (Proxy @(Mempool IO TestBlock)) + wNoThunks _ _ = return Nothing + +instance ( ToExpr (TxId (GenTx blk)) + , ToExpr (GenTx blk) + , ToExpr (LedgerState blk ValuesMK) + , ToExpr (TickedLedgerState blk ValuesMK) + , LedgerSupportsMempool blk + ) => ToExpr (Model blk r) where + + toExpr model = Rec "Model" $ TD.fromList + [ ("mempoolTip", toExpr $ modelMempoolIntermediateState model) + , ("ledgerTip", toExpr $ modelLedgerDBTip model) + , ("txs", toExpr $ modelTxs model) + , ("size", toExpr $ unByteSize32 $ txMeasureByteSize $ modelCurrentSize model) + , ("capacity", toExpr $ unByteSize32 $ txMeasureByteSize $ modelCapacity model) + , ("lastTicket", toExpr $ modelLastSeenTicketNo model)] + +instance ( ToExpr (TxId (GenTx blk)) + , ToExpr (GenTx blk) + , ToExpr (TickedLedgerState blk ValuesMK) + , ToExpr (LedgerState blk ValuesMK) + , LedgerSupportsMempool blk) => Show (Model blk r) where + show = show . toExpr + +instance ToExpr (Action TestBlock r) where + toExpr (TryAddTxs txs) = App "TryAddTxs" $ + [ App (take 8 (tail $ init $ show txid) + <> " " + <> show [ (take 8 (tail $ init $ show a), b) | (a,b) <- Set.toList txins ] + <> " ->> " + <> show [ ( condense a, b) | (_,(a, b)) <- Map.toList txouts ] + <> "") [] | SimpleGenTx tx txid <- txs + , let txins = Mock.txIns tx + , let txouts = Mock.txOuts tx] + toExpr SyncLedger = App "SyncLedger" [] + toExpr GetSnapshot = App "GetSnapshot" [] + +instance ToExpr (LedgerState blk ValuesMK) => ToExpr (Event blk r) where + toExpr (ChangeLedger ls b) = + Rec "ChangeLedger" $ TD.fromList [ ("tip", toExpr ls) + , ("newFork", toExpr b) ] + +instance ToExpr (Command TestBlock r) where + toExpr (Action act) = toExpr act + toExpr (Event ev) = toExpr ev + +instance ToExpr (Command blk r) => Show (Command blk r) where + show = -- unwords . take 2 . words . + show . toExpr + +instance ( ToExpr (GenTx blk) + , LedgerSupportsMempool blk) => ToExpr (Response blk r) where + + toExpr Void = App "Void" [] + toExpr (GotSnapshot s) = + Rec "GotSnapshot" $ + TD.fromList [ ("txs", toExpr s) ] + +instance ( ToExpr (GenTx blk) + , LedgerSupportsMempool blk) => Show (Response blk r) where + show = -- unwords . take 2 . words . + show . toExpr + +deriving instance NoThunks (LedgerState blk ValuesMK) => NoThunks (MockedLedgerDB blk) + +instance Arbitrary (LedgerState TestBlock ValuesMK) where + arbitrary = do + n <- getPositive <$> arbitrary + (txs, _) <- genValidTxs n testInitLedger + case runExcept $ repeatedlyM (flip (applyTxToLedger testLedgerConfigNoSizeLimits)) txs testInitLedger of + Left _ -> error "Must not happen" + Right st -> pure st + +instance ToExpr (TickedLedgerState TestBlock ValuesMK) where + toExpr (TickedSimpleLedgerState st) = App "Ticked" [ toExpr st ] + +instance ToExpr (LedgerState TestBlock ValuesMK) where + toExpr (SimpleLedgerState st tbs) = Rec "LedgerState" $ TD.fromList + [ ("state", toExpr $ mockTip st) + , ("tables", toExpr tbs)] + +instance ToExpr Addr where + toExpr a = App (show a) [] + +deriving instance ToExpr (GenTx TestBlock) +deriving instance ToExpr Tx +deriving instance ToExpr Expiry + +instance ToExpr (LedgerTables (LedgerState TestBlock) ValuesMK) where + toExpr = genericToExpr + +instance ToExpr (ValuesMK TxIn TxOut) where + toExpr (ValuesMK m) = App "Values" [ toExpr m ] + +class UnTick blk where + unTick :: forall mk. TickedLedgerState blk mk -> LedgerState blk mk + +instance UnTick TestBlock where + unTick = getTickedSimpleLedgerState diff --git a/ouroboros-consensus/test/consensus-test/Test/Consensus/Mempool/Util.hs b/ouroboros-consensus/test/consensus-test/Test/Consensus/Mempool/Util.hs new file mode 100644 index 0000000000..c00c8504ae --- /dev/null +++ b/ouroboros-consensus/test/consensus-test/Test/Consensus/Mempool/Util.hs @@ -0,0 +1,239 @@ +{-# LANGUAGE GADTs #-} +{-# LANGUAGE NumericUnderscores #-} + +module Test.Consensus.Mempool.Util ( + TestBlock + , TestTx + , TestTxError + , TestTxId + , TheMeasure + , applyTxToLedger + , genInvalidTx + , genLargeInvalidTx + , genTxs + , genValidTx + , genValidTxs + , mkTestLedgerConfig + , mustBeValid + , testInitLedger + , testLedgerConfigNoSizeLimits + , txIsValid + ) where + +import Cardano.Binary (Encoding, toCBOR) +import Cardano.Crypto.Hash +import Cardano.Slotting.Slot +import Control.Exception (assert) +import Control.Monad (guard) +import Control.Monad.Except (Except) +import Control.Monad.Trans.Except (runExcept) +import Data.Either (isRight) +import Data.List (nub) +import Data.Map.Strict (Map) +import qualified Data.Map.Strict as Map +import Data.Set (Set) +import qualified Data.Set as Set +import GHC.Stack +import Ouroboros.Consensus.Block +import Ouroboros.Consensus.BlockchainTime +import Ouroboros.Consensus.Config.SecurityParam +import qualified Ouroboros.Consensus.HardFork.History as HardFork +import Ouroboros.Consensus.Ledger.Abstract +import Ouroboros.Consensus.Ledger.SupportsMempool +import Ouroboros.Consensus.Ledger.Tables.Utils +import Ouroboros.Consensus.Mock.Ledger hiding (TxId) +import Ouroboros.Consensus.Node.ProtocolInfo (NumCoreNodes (..)) +import Ouroboros.Consensus.Protocol.BFT +import Ouroboros.Consensus.Util (safeMaximumOn) +import Test.Crypto.Hash () +import Test.QuickCheck hiding (elements) +import Test.Util.Orphans.IOLike () +import Test.Util.QuickCheck (elements) + +type TestBlock = SimpleBftBlock SimpleMockCrypto BftMockCrypto + +type TestTx = GenTx TestBlock + +type TestTxId = TxId TestTx + +type TestTxError = ApplyTxErr TestBlock + +type TheMeasure = IgnoringOverflow ByteSize32 + +-- There are 5 (core)nodes and each gets 1000. +testInitLedger :: LedgerState TestBlock ValuesMK +testInitLedger = genesisSimpleLedgerState $ mkAddrDist (NumCoreNodes 5) + +-- | Test config +-- +-- (We don't really care about these values here) +mkTestLedgerConfig :: MockConfig -> LedgerConfig TestBlock +mkTestLedgerConfig mockCfg = SimpleLedgerConfig { + simpleMockLedgerConfig = () + , simpleLedgerEraParams = + HardFork.defaultEraParams + (SecurityParam 4) + (slotLengthFromSec 20) + , simpleLedgerMockConfig = mockCfg + } + +testLedgerConfigNoSizeLimits :: LedgerConfig TestBlock +testLedgerConfigNoSizeLimits = mkTestLedgerConfig defaultMockConfig + +-- | Generate a number of valid and invalid transactions and apply the valid +-- transactions to the given 'LedgerState'. The transactions along with a +-- 'Bool' indicating whether its valid ('True') or invalid ('False') and the +-- resulting 'LedgerState' are returned. +genTxs :: Int -- ^ The number of transactions to generate + -> LedgerState TestBlock ValuesMK + -> Gen ([(TestTx, Bool)], LedgerState TestBlock ValuesMK) +genTxs = go [] + where + go txs n ledger + | n <= 0 = return (reverse txs, ledger) + | otherwise = do + valid <- arbitrary + if valid + then do + (validTx, ledger') <- genValidTx ledger + go ((validTx, True):txs) (n - 1) ledger' + else do + invalidTx <- genInvalidTx ledger + go ((invalidTx, False):txs) (n - 1) ledger + +-- | Generate a number of valid transactions and apply these to the given +-- 'LedgerState'. The transactions and the resulting 'LedgerState' are +-- returned. +genValidTxs :: Int -- ^ The number of valid transactions to generate + -> LedgerState TestBlock ValuesMK + -> Gen ([TestTx], LedgerState TestBlock ValuesMK) +genValidTxs = go [] + where + go txs n ledger + | n <= 0 = return (reverse txs, ledger) + | otherwise = do + (tx, ledger') <- genValidTx ledger + go (tx:txs) (n - 1) ledger' + +mustBeValid :: HasCallStack + => Except TestTxError (LedgerState TestBlock ValuesMK) + -> LedgerState TestBlock ValuesMK +mustBeValid ex = case runExcept ex of + Left _ -> error "impossible" + Right ledger -> ledger + +txIsValid :: LedgerConfig TestBlock -> LedgerState TestBlock ValuesMK -> TestTx -> Bool +txIsValid cfg ledgerState tx = + isRight $ runExcept $ applyTxToLedger cfg ledgerState tx + +-- | Generate a valid transaction (but ignoring any per-tx size limits, see Note +-- [Transaction size limit]). +genValidTx :: LedgerState TestBlock ValuesMK -> Gen (TestTx, LedgerState TestBlock ValuesMK) +genValidTx ledgerState@(SimpleLedgerState MockState {} (LedgerTables (ValuesMK utxo))) = do + -- Never let someone go broke, otherwise we risk concentrating all the + -- wealth in one person. That would be problematic (for the society) but + -- also because we wouldn't be able to generate any valid transactions + -- anymore. + + let sender + | Just (richest, _) <- safeMaximumOn snd $ Map.toList $ + sum . map snd <$> peopleWithFunds + = richest + | otherwise + = error "no people with funds" + + recipient <- elements $ filter (/= sender) $ Map.keys peopleWithFunds + let assets = peopleWithFunds Map.! sender + fortune = sum (map snd assets) + ins = Set.fromList $ map fst assets + + -- At most spent half of someone's fortune + amount <- choose (1, fortune `div` 2) + let outRecipient = (recipient, amount) + outs + | amount == fortune + = [outRecipient] + | otherwise + = [outRecipient, (sender, fortune - amount)] + tx = mkSimpleGenTx $ Tx DoNotExpire ins outs + return (tx, mustBeValid (applyTxToLedger testLedgerConfigNoSizeLimits ledgerState tx)) + where + peopleWithFunds :: Map Addr [(TxIn, Amount)] + peopleWithFunds = Map.unionsWith (<>) + [ Map.singleton addr [(txIn, amount)] + | (txIn, (addr, amount)) <- Map.toList utxo + ] + +genInvalidTx :: LedgerState TestBlock ValuesMK -> Gen TestTx +genInvalidTx ledgerState = do + let peopleWithFunds = nub $ map fst $ Map.elems utxo + sender <- elements peopleWithFunds + recipient <- elements $ filter (/= sender) peopleWithFunds + let assets = filter (\(_, (addr, _)) -> addr == sender) $ Map.toList utxo + ins = Set.fromList $ map fst assets + -- There is only 5 000 in 'testInitLedger', so any transaction spending + -- more than 5 000 is invalid. + amount <- choose (5_001, 10_000) + let outs = [(recipient, amount)] + tx = mkSimpleGenTx $ Tx DoNotExpire ins outs + return $ assert (not (txIsValid testLedgerConfigNoSizeLimits ledgerState tx)) tx + + where + SimpleLedgerState { + simpleLedgerTables = LedgerTables (ValuesMK utxo) + } = ledgerState + +-- | Generate an invalid tx that is larger than the given measure. +genLargeInvalidTx :: TheMeasure -> Gen TestTx +genLargeInvalidTx (IgnoringOverflow sz) = go Set.empty + where + go ins = case isLargeTx ins of + Just tx -> pure tx + Nothing -> do + newTxIn <- arbitrary + go (Set.insert newTxIn ins) + + isLargeTx :: Set TxIn -> Maybe TestTx + isLargeTx ins = do + let outs = [] + tx = mkSimpleGenTx $ Tx DoNotExpire ins outs + guard $ genTxSize tx > sz + pure tx + +-- | Apply a transaction to the ledger +-- +-- We don't have blocks in this test, but transactions only. In this function +-- we pretend the transaction /is/ a block, apply it to the UTxO, and then +-- update the tip of the ledger state, incrementing the slot number and faking +-- a hash. +applyTxToLedger :: LedgerConfig TestBlock + -> LedgerState TestBlock ValuesMK + -> TestTx + -> Except TestTxError (LedgerState TestBlock ValuesMK) +applyTxToLedger cfg st tx = + let SimpleLedgerState mockState _ = stowLedgerTables st in + unstowLedgerTables . mkNewLedgerState <$> updateMockUTxO mockCfg dummy tx mockState + where + mockCfg = simpleLedgerMockConfig cfg + + -- All expiries in this test are 'DoNotExpire', so the current time is + -- irrelevant. + dummy :: SlotNo + dummy = 0 + + mkNewLedgerState mockState' = + SimpleLedgerState mockState' { mockTip = BlockPoint slot' hash' } emptyLedgerTables + + slot' = case pointSlot $ mockTip (simpleLedgerState st) of + Origin -> 0 + NotOrigin s -> succ s + + -- A little trick to instantiate the phantom parameter of 'Hash' (and + -- 'HeaderHash') with 'TestBlock' while actually hashing the slot number: + -- use a custom serialiser to instantiate the phantom type parameter with + -- @Header TestBlock@, but actually encode the slot number instead. + hash' :: HeaderHash TestBlock + hash' = hashWithSerialiser fakeEncodeHeader (error "fake header") + + fakeEncodeHeader :: Header TestBlock -> Encoding + fakeEncodeHeader _ = toCBOR slot' diff --git a/ouroboros-consensus/test/consensus-test/Test/Consensus/MiniProtocol/BlockFetch/Client.hs b/ouroboros-consensus/test/consensus-test/Test/Consensus/MiniProtocol/BlockFetch/Client.hs index c03639a6bb..49d3d64c1f 100644 --- a/ouroboros-consensus/test/consensus-test/Test/Consensus/MiniProtocol/BlockFetch/Client.hs +++ b/ouroboros-consensus/test/consensus-test/Test/Consensus/MiniProtocol/BlockFetch/Client.hs @@ -23,6 +23,7 @@ module Test.Consensus.MiniProtocol.BlockFetch.Client (tests) where import Control.Monad (replicateM) +import Control.Monad.Base import Control.Monad.Class.MonadTime import Control.Monad.Class.MonadTimer.SI (MonadTimer) import Control.Monad.IOSim (runSimOrThrow) @@ -43,7 +44,7 @@ import qualified Ouroboros.Consensus.MiniProtocol.BlockFetch.ClientInterface as import Ouroboros.Consensus.Node.ProtocolInfo (NumCoreNodes (..)) import qualified Ouroboros.Consensus.Storage.ChainDB.API as ChainDB import qualified Ouroboros.Consensus.Storage.ChainDB.Impl as ChainDBImpl -import qualified Ouroboros.Consensus.Storage.ChainDB.Impl.Args as ChainDB +import Ouroboros.Consensus.Storage.ChainDB.Impl.Args import Ouroboros.Consensus.Util.Condense (Condense (..)) import Ouroboros.Consensus.Util.IOLike import Ouroboros.Consensus.Util.STM (blockUntilJust, @@ -119,7 +120,7 @@ data BlockFetchClientOutcome = BlockFetchClientOutcome { runBlockFetchTest :: forall m. - (IOLike m, MonadTime m, MonadTimer m) + (IOLike m, MonadTime m, MonadTimer m, MonadBase m m) => BlockFetchClientTestSetup -> m BlockFetchClientOutcome runBlockFetchTest BlockFetchClientTestSetup{..} = withRegistry \registry -> do @@ -244,7 +245,7 @@ runBlockFetchTest BlockFetchClientTestSetup{..} = withRegistry \registry -> do , mcdbRegistry = registry , mcdbNodeDBs = nodeDBs } - pure $ ChainDB.updateTracer cdbTracer args + pure $ updateTracer cdbTracer args (_, (chainDB, ChainDBImpl.Internal{intAddBlockRunner})) <- allocate registry diff --git a/ouroboros-consensus/test/consensus-test/Test/Consensus/MiniProtocol/ChainSync/Client.hs b/ouroboros-consensus/test/consensus-test/Test/Consensus/MiniProtocol/ChainSync/Client.hs index 60c5ebdc31..3cf5035873 100644 --- a/ouroboros-consensus/test/consensus-test/Test/Consensus/MiniProtocol/ChainSync/Client.hs +++ b/ouroboros-consensus/test/consensus-test/Test/Consensus/MiniProtocol/ChainSync/Client.hs @@ -746,10 +746,10 @@ computePastLedger :: TopLevelConfig TestBlock -> Point TestBlock -> Chain TestBlock - -> Maybe (ExtLedgerState TestBlock) + -> Maybe (ExtLedgerState TestBlock EmptyMK) computePastLedger cfg pt chain | pt `elem` validPoints - = Just $ go testInitExtLedger (Chain.toOldestFirst chain) + = Just $ go (convertMapKind testInitExtLedger) (Chain.toOldestFirst chain) | otherwise = Nothing where @@ -769,12 +769,12 @@ computePastLedger cfg pt chain -- matching @pt@, after which we return the resulting ledger. -- -- PRECONDITION: @pt@ is in the list of blocks or genesis. - go :: ExtLedgerState TestBlock -> [TestBlock] -> ExtLedgerState TestBlock + go :: ExtLedgerState TestBlock EmptyMK -> [TestBlock] -> ExtLedgerState TestBlock EmptyMK go !st blks | castPoint (getTip st) == pt = st | blk:blks' <- blks - = go (tickThenReapply (ExtLedgerCfg cfg) blk st) blks' + = go (convertMapKind $ tickThenReapply (ExtLedgerCfg cfg) blk (convertMapKind st)) blks' | otherwise = error "point not in the list of blocks" @@ -785,7 +785,7 @@ computeHeaderStateHistory :: -> HeaderStateHistory TestBlock computeHeaderStateHistory cfg = HeaderStateHistory.trim (fromIntegral k) - . HeaderStateHistory.fromChain cfg testInitExtLedger + . HeaderStateHistory.fromChain cfg (convertMapKind testInitExtLedger) where SecurityParam k = configSecurityParam cfg diff --git a/ouroboros-consensus/test/consensus-test/Test/Consensus/MiniProtocol/LocalStateQuery/Server.hs b/ouroboros-consensus/test/consensus-test/Test/Consensus/MiniProtocol/LocalStateQuery/Server.hs index 5310647fd7..79d3639f91 100644 --- a/ouroboros-consensus/test/consensus-test/Test/Consensus/MiniProtocol/LocalStateQuery/Server.hs +++ b/ouroboros-consensus/test/consensus-test/Test/Consensus/MiniProtocol/LocalStateQuery/Server.hs @@ -1,5 +1,6 @@ {-# LANGUAGE GADTs #-} {-# LANGUAGE LambdaCase #-} +{-# LANGUAGE NamedFieldPuns #-} {-# LANGUAGE ScopedTypeVariables #-} {-# OPTIONS_GHC -Wno-orphans #-} @@ -20,10 +21,13 @@ module Test.Consensus.MiniProtocol.LocalStateQuery.Server (tests) where import Cardano.Crypto.DSIGN.Mock +import Control.Concurrent.Class.MonadSTM.Strict.TMVar +import Control.Monad.Base import Control.Monad.IOSim (runSimOrThrow) -import Control.Tracer (nullTracer) -import Data.Map.Strict (Map) +import Control.ResourceRegistry +import Control.Tracer import qualified Data.Map.Strict as Map +import Data.Maybe (fromMaybe) import Network.TypedProtocol.Stateful.Proofs (connect) import Ouroboros.Consensus.Block import Ouroboros.Consensus.BlockchainTime @@ -36,14 +40,14 @@ import Ouroboros.Consensus.Node.ProtocolInfo (NumCoreNodes (..)) import Ouroboros.Consensus.NodeId import Ouroboros.Consensus.Protocol.BFT import qualified Ouroboros.Consensus.Storage.ChainDB.Impl.BlockCache as BlockCache -import Ouroboros.Consensus.Storage.ChainDB.Impl.LgrDB (LgrDB, - LgrDbArgs (..), mkLgrDB) -import qualified Ouroboros.Consensus.Storage.ChainDB.Impl.LgrDB as LgrDB -import Ouroboros.Consensus.Storage.LedgerDB (configLedgerDb, - defaultDiskPolicyArgs) -import qualified Ouroboros.Consensus.Storage.LedgerDB as LgrDB (ledgerDbPast, - ledgerDbTip, ledgerDbWithAnchor) -import Ouroboros.Consensus.Util.IOLike +import Ouroboros.Consensus.Storage.ImmutableDB.Impl.Stream hiding + (streamAPI) +import Ouroboros.Consensus.Storage.LedgerDB (LedgerDB') +import qualified Ouroboros.Consensus.Storage.LedgerDB as LedgerDB +import Ouroboros.Consensus.Storage.LedgerDB.Impl.Args +import Ouroboros.Consensus.Storage.LedgerDB.Impl.Snapshots +import Ouroboros.Consensus.Storage.LedgerDB.V1.Args +import Ouroboros.Consensus.Util.IOLike hiding (newTVarIO) import Ouroboros.Network.Mock.Chain (Chain (..)) import qualified Ouroboros.Network.Mock.Chain as Chain import Ouroboros.Network.Protocol.LocalStateQuery.Client @@ -52,7 +56,9 @@ import Ouroboros.Network.Protocol.LocalStateQuery.Examples import Ouroboros.Network.Protocol.LocalStateQuery.Server import Ouroboros.Network.Protocol.LocalStateQuery.Type (AcquireFailure (..), State (..), Target (..)) -import System.FS.API (HasFS, SomeHasFS (..)) +import System.FS.API (SomeHasFS (..)) +import qualified System.FS.Sim.MockFS as MockFS +import System.FS.Sim.STM import Test.QuickCheck hiding (Result) import Test.Tasty import Test.Tasty.QuickCheck @@ -73,7 +79,7 @@ tests = testGroup "LocalStateQueryServer" -------------------------------------------------------------------------------} -- | Plan: --- * Preseed the LgrDB of the server with the preferred chain of the +-- * Preseed the LedgerDB of the server with the preferred chain of the -- 'BlockTree'. -- * Acquire for each block in the 'BlockTree', including the ones not on the -- chain, a state and send the 'QueryLedgerTip'. Collect these results. @@ -95,10 +101,11 @@ prop_localStateQueryServer k bt p (Positive (Small n)) = checkOutcome k chain ac replicate n VolatileTip ++ (SpecificPoint . blockPoint <$> treeToBlocks bt) + actualOutcome :: [(Target (Point TestBlock), Either AcquireFailure (Point TestBlock))] - actualOutcome = runSimOrThrow $ do + actualOutcome = runSimOrThrow $ withRegistry $ \rr ->do let client = mkClient points - server <- mkServer k chain + server <- mkServer rr k chain (\(a, _, _) -> a) <$> connect StateIdle @@ -114,7 +121,7 @@ prop_localStateQueryServer k bt p (Positive (Small n)) = checkOutcome k chain ac -- whether the results are correct. -- -- NOTE: when we don't get an 'AcquireFailure', even though we expected it, we --- accept it. This is because the LgrDB may contain snapshots for blocks on +-- accept it. This is because the LedgerDB may contain snapshots for blocks on -- the current chain older than @k@, but we do not want to imitate such -- implementation details. -- @@ -149,7 +156,7 @@ checkOutcome k chain = conjoin . map (uncurry checkResult) | pointSlot pt >= immutableSlot -> counterexample ("Point " <> show pt <> - " newer than the immutable tip, but got AcquireFailurePointTooOld") + " newer or equal than the immutable tip " <> show immutableSlot <>", but got AcquireFailurePointTooOld") (property False) | otherwise -> tabulate "Acquired" ["AcquireFailurePointTooOld"] $ property True @@ -160,8 +167,8 @@ checkOutcome k chain = conjoin . map (uncurry checkResult) Right _result -> tabulate "Acquired" ["Success"] True Left failure -> counterexample ("acquire tip point resulted in " ++ show failure) False -mkClient - :: Monad m +mkClient :: + Monad m => [Target (Point TestBlock)] -> LocalStateQueryClient TestBlock @@ -172,64 +179,70 @@ mkClient mkClient points = localStateQueryClient [(pt, BlockQuery QueryLedgerTip) | pt <- points] mkServer :: - IOLike m - => SecurityParam + (IOLike m, MonadBase m m) + => ResourceRegistry m + -> SecurityParam -> Chain TestBlock -> m (LocalStateQueryServer TestBlock (Point TestBlock) (Query TestBlock) m ()) -mkServer k chain = do - lgrDB <- initLgrDB k chain +mkServer rr k chain = do + lgrDB <- initLedgerDB k chain return $ localStateQueryServer cfg - (castPoint . LgrDB.ledgerDbTip <$> LgrDB.getCurrent lgrDB) - (\pt -> LgrDB.ledgerDbPast pt <$> LgrDB.getCurrent lgrDB) - getImmutablePoint + (LedgerDB.getReadOnlyForker lgrDB rr) where cfg = ExtLedgerCfg $ testCfg k - getImmutablePoint = return $ Chain.headPoint $ - Chain.drop (fromIntegral (maxRollbacks k)) chain --- | Initialise a 'LgrDB' with the given chain. -initLgrDB :: - forall m. IOLike m - => SecurityParam - -> Chain TestBlock - -> m (LgrDB m TestBlock) -initLgrDB k chain = do - varDB <- newTVarIO genesisLedgerDB - varPrevApplied <- newTVarIO mempty - let lgrDB = mkLgrDB varDB varPrevApplied resolve args k - LgrDB.validate lgrDB genesisLedgerDB BlockCache.empty 0 noopTrace - (map getHeader (Chain.toOldestFirst chain)) >>= \case - LgrDB.ValidateExceededRollBack _ -> - error "impossible: rollback was 0" - LgrDB.ValidateLedgerError _ -> - error "impossible: there were no invalid blocks" - LgrDB.ValidateSuccessful ledgerDB' -> do - atomically $ LgrDB.setCurrent lgrDB ledgerDB' - return lgrDB +streamAPI :: forall m. IOLike m => StreamAPI m TestBlock TestBlock +streamAPI = StreamAPI {streamAfter} where - resolve :: RealPoint TestBlock -> m TestBlock - resolve = return . (blockMapping Map.!) - - blockMapping :: Map (RealPoint TestBlock) TestBlock - blockMapping = Map.fromList - [(blockRealPoint b, b) | b <- Chain.toOldestFirst chain] + streamAfter :: + Point TestBlock + -> (Either (RealPoint TestBlock) (m (NextItem TestBlock)) -> m a) + -> m a + streamAfter _ k = do + k (Right (pure NoMoreItems)) - cfg = configLedgerDb $ testCfg k - - genesisLedgerDB = LgrDB.ledgerDbWithAnchor testInitExtLedger +-- | Initialise a 'LedgerDB' with the given chain. +initLedgerDB :: + (IOLike m, MonadBase m m) + => SecurityParam + -> Chain TestBlock + -> m (LedgerDB' m TestBlock) +initLedgerDB s c = do + reg <- unsafeNewRegistry + fs <- newTMVarIO MockFS.empty + let snapshotPolicyArgs = SnapshotPolicyArgs + { spaInterval = DefaultSnapshotInterval + , spaNum = DefaultNumOfDiskSnapshots + } + args = LedgerDbArgs + { lgrSnapshotPolicyArgs = snapshotPolicyArgs + , lgrHasFS = SomeHasFS $ simHasFS fs + , lgrGenesis = return testInitExtLedger + , lgrTracer = nullTracer + , lgrFlavorArgs = LedgerDbFlavorArgsV1 $ V1Args DefaultFlushFrequency DefaultQueryBatchSize InMemoryBackingStoreArgs + , lgrConfig = LedgerDB.configLedgerDb $ testCfg s + , lgrRegistry = reg + , lgrStartSnapshot = Nothing + } + ldb <- fst <$> LedgerDB.openDB + args + streamAPI + (Chain.headPoint c) + (\rpt -> pure $ fromMaybe (error "impossible") $ Chain.findBlock ((rpt ==) . blockRealPoint) c) - noopTrace :: blk -> m () - noopTrace = const $ pure () + result <- LedgerDB.validate ldb reg (const $ pure ()) BlockCache.empty 0 (map getHeader $ Chain.toOldestFirst c) + case result of + LedgerDB.ValidateSuccessful forker -> do + atomically $ LedgerDB.forkerCommit forker + LedgerDB.forkerClose forker + LedgerDB.ValidateExceededRollBack _ -> + error "impossible: rollback was 0" + LedgerDB.ValidateLedgerError _ -> + error "impossible: there were no invalid blocks" - args = LgrDbArgs - { lgrConfig = cfg - , lgrHasFS = SomeHasFS (error "lgrHasFS" :: HasFS m ()) - , lgrDiskPolicyArgs = defaultDiskPolicyArgs - , lgrGenesis = return testInitExtLedger - , lgrTracer = nullTracer - } + pure ldb testCfg :: SecurityParam -> TopLevelConfig TestBlock testCfg securityParam = TopLevelConfig { diff --git a/ouroboros-consensus/test/storage-test/Main.hs b/ouroboros-consensus/test/storage-test/Main.hs index 6b3986e1b6..e06d57b050 100644 --- a/ouroboros-consensus/test/storage-test/Main.hs +++ b/ouroboros-consensus/test/storage-test/Main.hs @@ -1,11 +1,19 @@ +{-# LANGUAGE NumericUnderscores #-} module Main (main) where +import Control.Concurrent (threadDelay) +import Control.Concurrent.Async (race_) +import Control.Monad (forever) +import System.IO (hFlush, stdout) import qualified Test.Ouroboros.Storage import Test.Tasty import Test.Util.TestEnv main :: IO () -main = defaultMainWithTestEnv defaultTestEnvConfig tests +main = runTests `race_` heartbeat + where + runTests = defaultMainWithTestEnv defaultTestEnvConfig tests + heartbeat = forever $ threadDelay (30 * 1_000_000) >> putChar '.' >> hFlush stdout tests :: TestTree tests = testGroup "ouroboros-storage" [ diff --git a/ouroboros-consensus/test/storage-test/Test/Ouroboros/Storage/ChainDB/FollowerPromptness.hs b/ouroboros-consensus/test/storage-test/Test/Ouroboros/Storage/ChainDB/FollowerPromptness.hs index 5b224712c9..79944f9e4a 100644 --- a/ouroboros-consensus/test/storage-test/Test/Ouroboros/Storage/ChainDB/FollowerPromptness.hs +++ b/ouroboros-consensus/test/storage-test/Test/Ouroboros/Storage/ChainDB/FollowerPromptness.hs @@ -20,6 +20,7 @@ module Test.Ouroboros.Storage.ChainDB.FollowerPromptness (tests) where import Control.Monad (forever) +import Control.Monad.Base import Control.Monad.IOSim (runSimOrThrow) import Control.ResourceRegistry import Control.Tracer (Tracer (..), contramapM, traceWith) @@ -35,7 +36,7 @@ import Ouroboros.Consensus.Storage.ChainDB.API (ChainDB) import qualified Ouroboros.Consensus.Storage.ChainDB.API as ChainDB import qualified Ouroboros.Consensus.Storage.ChainDB.API.Types.InvalidBlockPunishment as Punishment import qualified Ouroboros.Consensus.Storage.ChainDB.Impl as ChainDBImpl -import qualified Ouroboros.Consensus.Storage.ChainDB.Impl.Args as ChainDB +import Ouroboros.Consensus.Storage.ChainDB.Impl.Args import Ouroboros.Consensus.Util.Condense (Condense (..)) import Ouroboros.Consensus.Util.Enclose import Ouroboros.Consensus.Util.IOLike @@ -112,7 +113,7 @@ data FollowerPromptnessOutcome = FollowerPromptnessOutcome { } runFollowerPromptnessTest :: - forall m. IOLike m + forall m. (IOLike m, MonadBase m m) => FollowerPromptnessTestSetup -> m FollowerPromptnessOutcome runFollowerPromptnessTest FollowerPromptnessTestSetup{..} = withRegistry \registry -> do @@ -168,13 +169,13 @@ runFollowerPromptnessTest FollowerPromptnessTestSetup{..} = withRegistry \regist -> m (ChainDB m TestBlock) openChainDB registry cdbTracer = do chainDbArgs <- do - let mcdbTopLevelConfig = singleNodeTestConfigWithK securityParam - mcdbChunkInfo = mkTestChunkInfo mcdbTopLevelConfig - mcdbInitLedger = testInitExtLedger - mcdbRegistry = registry + let mcdbTopLevelConfig = singleNodeTestConfigWithK securityParam + mcdbChunkInfo = mkTestChunkInfo mcdbTopLevelConfig + mcdbInitLedger = testInitExtLedger + mcdbRegistry = registry mcdbNodeDBs <- emptyNodeDBs let cdbArgs = fromMinimalChainDbArgs MinimalChainDbArgs{..} - pure $ ChainDB.updateTracer cdbTracer cdbArgs + pure $ updateTracer cdbTracer cdbArgs (_, (chainDB, ChainDBImpl.Internal{intAddBlockRunner})) <- allocate registry diff --git a/ouroboros-consensus/test/storage-test/Test/Ouroboros/Storage/ChainDB/Model.hs b/ouroboros-consensus/test/storage-test/Test/Ouroboros/Storage/ChainDB/Model.hs index 0e277dde9d..0fa1096b73 100644 --- a/ouroboros-consensus/test/storage-test/Test/Ouroboros/Storage/ChainDB/Model.hs +++ b/ouroboros-consensus/test/storage-test/Test/Ouroboros/Storage/ChainDB/Model.hs @@ -30,8 +30,8 @@ module Test.Ouroboros.Storage.ChainDB.Model ( , getBlock , getBlockByPoint , getBlockComponentByPoint + , getDbChangelog , getIsValid - , getLedgerDB , getLoEFragment , getMaxSlotNo , hasBlock @@ -109,7 +109,9 @@ import Ouroboros.Consensus.Storage.ChainDB.API (AddBlockPromise (..), StreamFrom (..), StreamTo (..), UnknownRange (..), validBounds) import Ouroboros.Consensus.Storage.ChainDB.Impl.ChainSel (olderThanK) -import Ouroboros.Consensus.Storage.LedgerDB +import Ouroboros.Consensus.Storage.LedgerDB.API.Config + (LedgerDbCfg (..)) +import qualified Ouroboros.Consensus.Storage.LedgerDB.V1.DbChangelog as DbChangelog import Ouroboros.Consensus.Util (repeatedly) import qualified Ouroboros.Consensus.Util.AnchoredFragment as Fragment import Ouroboros.Consensus.Util.IOLike (MonadSTM) @@ -132,8 +134,8 @@ data Model blk = Model { , immutableDbChain :: Chain blk -- ^ The ImmutableDB , cps :: CPS.ChainProducerState blk - , currentLedger :: ExtLedgerState blk - , initLedger :: ExtLedgerState blk + , currentLedger :: ExtLedgerState blk EmptyMK + , initLedger :: ExtLedgerState blk EmptyMK , iterators :: Map IteratorId [blk] , valid :: Set (HeaderHash blk) , invalid :: InvalidBlocks blk @@ -150,11 +152,11 @@ deriving instance ( ToExpr blk , ToExpr (HeaderHash blk) , ToExpr (ChainDepState (BlockProtocol blk)) , ToExpr (TipInfo blk) - , ToExpr (LedgerState blk) + , ToExpr (LedgerState blk EmptyMK) , ToExpr (ExtValidationError blk) , ToExpr (Chain blk) , ToExpr (ChainProducerState blk) - , ToExpr (ExtLedgerState blk) + , ToExpr (ExtLedgerState blk EmptyMK) ) => ToExpr (Model blk) @@ -335,15 +337,17 @@ isValid :: forall blk. LedgerSupportsProtocol blk -> Maybe Bool isValid = flip getIsValid -getLedgerDB :: - LedgerSupportsProtocol blk +getDbChangelog :: + (LedgerSupportsProtocol blk, LedgerTablesAreTrivial (LedgerState blk)) => TopLevelConfig blk -> Model blk - -> LedgerDB (ExtLedgerState blk) -getLedgerDB cfg m@Model{..} = - ledgerDbPrune (SecurityParam (maxActualRollback k m)) - $ ledgerDbPushMany' ledgerDbCfg blks - $ ledgerDbWithAnchor initLedger + -> DbChangelog.DbChangelog' blk +getDbChangelog cfg m@Model{..} = + DbChangelog.onChangelog + ( DbChangelog.prune (SecurityParam (maxActualRollback k m)) + . DbChangelog.reapplyThenPushMany' ledgerDbCfg blks DbChangelog.trivialKeySetsReader + ) + $ DbChangelog.empty initLedger where blks = Chain.toOldestFirst $ currentChain m @@ -364,7 +368,7 @@ getLoEFragment = loeFragment empty :: HasHeader blk => LoE () - -> ExtLedgerState blk + -> ExtLedgerState blk EmptyMK -> Model blk empty loe initLedger = Model { volatileDbBlocks = Map.empty @@ -379,7 +383,7 @@ empty loe initLedger = Model { , loeFragment = loe $> Fragment.Empty Fragment.AnchorGenesis } -addBlock :: forall blk. LedgerSupportsProtocol blk +addBlock :: forall blk. (LedgerSupportsProtocol blk, LedgerTablesAreTrivial (ExtLedgerState blk)) => TopLevelConfig blk -> blk -> Model blk -> Model blk @@ -401,9 +405,14 @@ addBlock cfg blk m -- If it's an invalid block we've seen before, ignore it. Map.member (blockHash blk) (invalid m) -chainSelection :: forall blk. LedgerSupportsProtocol blk - => TopLevelConfig blk - -> Model blk -> Model blk +chainSelection :: + forall blk. + ( LedgerTablesAreTrivial (ExtLedgerState blk) + , LedgerSupportsProtocol blk + ) + => TopLevelConfig blk + -> Model blk + -> Model blk chainSelection cfg m = Model { volatileDbBlocks = volatileDbBlocks m , immutableDbChain = immutableDbChain m @@ -422,7 +431,7 @@ chainSelection cfg m = Model { -- @invalid'@ will be a (non-strict) superset of the previous value of -- @invalid@, see 'validChains', thus no need to union. invalid' :: InvalidBlocks blk - candidates :: [(Chain blk, ExtLedgerState blk)] + candidates :: [(Chain blk, ExtLedgerState blk EmptyMK)] (invalid', candidates) = validChains cfg m (blocks m) immutableChainHashes = @@ -500,7 +509,7 @@ chainSelection cfg m = Model { volatileFrag = volatileChain secParam id m newChain :: Chain blk - newLedger :: ExtLedgerState blk + newLedger :: ExtLedgerState blk EmptyMK (newChain, newLedger) = fromMaybe (currentChain m, currentLedger m) . selectChain @@ -519,7 +528,104 @@ chainSelection cfg m = Model { (Set.fromList . map blockHash . Chain.toOldestFirst . fst) consideredCandidates -addBlocks :: LedgerSupportsProtocol blk +-- = Getting the valid blocks +-- +-- The chain selection algorithms implemented by the model and by the SUT differ +-- but have the same outcome.We illustrate this with an example. Imagine having +-- the following candidate chains where @v@ represents a valid block and @x@ +-- represents an invalid block: +-- +-- > C0: vvvvvxxxxx +-- > C1: vvvvvvvx +-- > C2: vvv +-- +-- For candidate Cx, we will call CxV the valid prefix and CxI the invalid suffix. +-- +-- The chain selection algorithm will run whenever we add a block, although it +-- will only select a new chain when adding a block results in a chain that is +-- longer than the currently selected chain. Note that the chain selection +-- algorithm doesn't know beforehand the validity of the blocks in the +-- candidates. The process it follows will be: +-- +-- 1. Sort the chains by 'SelectView'. Note that for Praos this will trivially +-- imply first consider the candidates by length. +-- +-- > sortedCandidates == [C0, C1, C2] +-- +-- 2. Until a candidate is found to be valid and longer than the currently selected +-- chain, take the head of the (sorted) list of candidates and validate the +-- blocks in it one by one. +-- +-- If a block in the candidate is found to be invalid, the candidate is +-- truncated, added back to the list, and the algorithm starts again at step 1. +-- The valid blocks in the candidate are recorded in the set of known-valid +-- blocks, so that the next time they are applied, it is known that applying +-- said block can't fail and therefore some checks can be skipped. The invalid +-- blocks in the candidate are recorded in the set of known-invalid blocks so +-- that they are not applied again. +-- +-- The steps on the example are as follows: +-- +-- 1. Start with the sorted candidate chains: [C0, C1, C2] +-- 2. Validate first chain C0 resulting in C0V and C0I. +-- 3. Append C0V to the list of remaining candidates: [C1, C2] ++ [C0V] +-- 4. Add the valid blocks to the state: +-- > knownValid = append C0V knownValid +-- 5. Add the invalid blocks to the state: +-- > knownInvalid = append C0I knownInvalid +-- 6. Re-sort list +-- > sortBy `selectView` [C1, C2, C0V] == [C1, C0V, C2] +-- 7. Validate first chain C1 resulting in C1V and C1I. +-- 8. Append C1V to the list of remaining candidates: [C0V, C2] ++ [C1V] +-- 9. Add the valid blocks to the state: +-- > knownValid = append C1V knownValid +-- 10. Add the invalid blocks to the state: +-- > knownInvalid = append C1I knownInvalid +-- 11. Re-sort list +-- > sortBy `selectView` [C0V, C2, C1V] == [C1V, C0V, C2] +-- 12. Validate first chain C1V, which is fully valid and returned. +-- +-- 3. If such a candidate is found, the algorithm will return it as a result. +-- Otherwise, the algorithm will return a 'Nothing'. +-- +-- > chainSelection [C0, C1, C2] = Just C1V +-- +-- On the other hand, the chain selection on the model takes some shortcuts to +-- achieve the same result: +-- +-- 1. 'validChains' will return the list of candidates sorted by 'SelectView' and +-- each candidate is truncated to its valid prefix. +-- +-- > validChains [C0, C1, C2] = (invalid == C0I + C1I, candidates == [C0V, C1V, C2]) +-- +-- 2. 'selectChain' will sort the chains by 'SelectView' but note that now it will +-- use the 'SelectView' of the already truncated candidate. +-- +-- > selectChain [C0V, C1V, C2] = listToMaybe (sortBy `selectView` [C0V, C1V, C2]) +-- > = listToMaybe ([C1V, C0V, C2]) +-- > = Just C1V +-- +-- The selected candidate will be the same one that the chain selection +-- algorithm would choose. However, as the chain selection algorithm will +-- consider the candidates as they were sorted by 'SelectView' on the +-- non-truncated candidates, blocks in 'C0V' are also considered valid by the +-- real algorithm. +-- +-- To get as a result a set of valid blocks that mirrors the one from the +-- real algorithm, the model can process the list of candidates returned by +-- 'validChains' until it find the one 'selectChain' chose as these will be +-- the ones that the real algorithm would test and re-add to the list once +-- truncated. +-- +-- > knownInvalid = append (C0I + C1I) knownInvalid +-- > knownValid = foldl append knownValid (takeWhile (/= C1V) candidates ++ [C1V]) +-- +-- Note that the set of known valid blocks is equivalent to the set computed +-- by real algorithm, but the set of known invalid blocks is a superset of +-- the ones known by the real algorithm. See the note +-- Ouroboros.Storage.ChainDB.StateMachine.[Invalid blocks]. + +addBlocks :: (LedgerSupportsProtocol blk, LedgerTablesAreTrivial (ExtLedgerState blk)) => TopLevelConfig blk -> [blk] -> Model blk -> Model blk @@ -527,7 +633,7 @@ addBlocks cfg = repeatedly (addBlock cfg) -- | Wrapper around 'addBlock' that returns an 'AddBlockPromise'. addBlockPromise :: - forall m blk. (LedgerSupportsProtocol blk, MonadSTM m) + forall m blk. (LedgerSupportsProtocol blk, MonadSTM m, LedgerTablesAreTrivial (ExtLedgerState blk)) => TopLevelConfig blk -> blk -> Model blk @@ -545,7 +651,10 @@ addBlockPromise cfg blk m = (result, m') -- | Update the LoE fragment, trigger chain selection and return the new tip -- point. updateLoE :: - forall blk. LedgerSupportsProtocol blk + forall blk. + ( LedgerTablesAreTrivial (ExtLedgerState blk) + , LedgerSupportsProtocol blk + ) => TopLevelConfig blk -> AnchoredFragment blk -> Model blk @@ -714,7 +823,7 @@ type InvalidBlocks blk = Map (HeaderHash blk) (ExtValidationError blk, SlotNo) data ValidatedChain blk = ValidatedChain (Chain blk) -- ^ Valid prefix - (ExtLedgerState blk) -- ^ Corresponds to the tip of the valid prefix + (ExtLedgerState blk EmptyMK) -- ^ Corresponds to the tip of the valid prefix (InvalidBlocks blk) -- ^ Invalid blocks encountered while validating -- the candidate chain. @@ -722,7 +831,7 @@ data ValidatedChain blk = -- -- The 'InvalidBlocks' in the returned 'ValidatedChain' will be >= the -- 'invalid' of the given 'Model'. -validate :: forall blk. LedgerSupportsProtocol blk +validate :: forall blk. (LedgerSupportsProtocol blk, LedgerTablesAreTrivial (ExtLedgerState blk)) => TopLevelConfig blk -> Model blk -> Chain blk @@ -734,14 +843,14 @@ validate cfg Model { initLedger, invalid } chain = mkInvalid b reason = Map.singleton (blockHash b) (reason, blockSlot b) - go :: ExtLedgerState blk -- ^ Corresponds to the tip of the valid prefix + go :: ExtLedgerState blk EmptyMK -- ^ Corresponds to the tip of the valid prefix -> Chain blk -- ^ Valid prefix -> [blk] -- ^ Remaining blocks to validate -> ValidatedChain blk go ledger validPrefix = \case -- Return 'mbFinal' if it contains an "earlier" result [] -> ValidatedChain validPrefix ledger invalid - b:bs' -> case runExcept (tickThenApply (ExtLedgerCfg cfg) b ledger) of + b:bs' -> case runExcept (tickThenApply (ExtLedgerCfg cfg) b (convertMapKind ledger)) of -- Invalid block according to the ledger Left e -> ValidatedChain @@ -759,7 +868,7 @@ validate cfg Model { initLedger, invalid } chain = -- This is the good path | otherwise - -> go ledger' (validPrefix :> b) bs' + -> go (convertMapKind ledger') (validPrefix :> b) bs' chains :: forall blk. (GetPrevHash blk) => Map (HeaderHash blk) blk -> [Chain blk] @@ -782,11 +891,11 @@ chains bs = go Chain.Genesis fwd :: Map (ChainHash blk) (Map (HeaderHash blk) blk) fwd = successors (Map.elems bs) -validChains :: forall blk. LedgerSupportsProtocol blk +validChains :: forall blk. (LedgerSupportsProtocol blk, LedgerTablesAreTrivial (ExtLedgerState blk)) => TopLevelConfig blk -> Model blk -> Map (HeaderHash blk) blk - -> (InvalidBlocks blk, [(Chain blk, ExtLedgerState blk)]) + -> (InvalidBlocks blk, [(Chain blk, ExtLedgerState blk EmptyMK)]) validChains cfg m bs = foldMap (classify . validate cfg m) $ -- Note that we sort here to make sure we pick the same chain as the real @@ -815,7 +924,7 @@ validChains cfg m bs = ) classify :: ValidatedChain blk - -> (InvalidBlocks blk, [(Chain blk, ExtLedgerState blk)]) + -> (InvalidBlocks blk, [(Chain blk, ExtLedgerState blk EmptyMK)]) classify (ValidatedChain chain ledger invalid) = (invalid, [(chain, ledger)]) @@ -1004,7 +1113,7 @@ reopen :: Model blk -> Model blk reopen m = m { isOpen = True } wipeVolatileDB :: - forall blk. LedgerSupportsProtocol blk + forall blk. (LedgerSupportsProtocol blk, LedgerTablesAreTrivial (ExtLedgerState blk)) => TopLevelConfig blk -> Model blk -> (Point blk, Model blk) @@ -1025,7 +1134,7 @@ wipeVolatileDB cfg m = -- Get the chain ending at the ImmutableDB by doing chain selection on the -- sole candidate (or none) in the ImmutableDB. newChain :: Chain blk - newLedger :: ExtLedgerState blk + newLedger :: ExtLedgerState blk EmptyMK (newChain, newLedger) = isSameAsImmutableDbChain $ selectChain diff --git a/ouroboros-consensus/test/storage-test/Test/Ouroboros/Storage/ChainDB/Model/Test.hs b/ouroboros-consensus/test/storage-test/Test/Ouroboros/Storage/ChainDB/Model/Test.hs index 631c4c593d..26ad9fb37a 100644 --- a/ouroboros-consensus/test/storage-test/Test/Ouroboros/Storage/ChainDB/Model/Test.hs +++ b/ouroboros-consensus/test/storage-test/Test/Ouroboros/Storage/ChainDB/Model/Test.hs @@ -27,6 +27,7 @@ module Test.Ouroboros.Storage.ChainDB.Model.Test (tests) where import GHC.Stack import Ouroboros.Consensus.Block import Ouroboros.Consensus.Config +import Ouroboros.Consensus.Ledger.Tables import Ouroboros.Consensus.Storage.ChainDB.API (LoE (..), StreamFrom (..), StreamTo (..)) import qualified Ouroboros.Consensus.Util.AnchoredFragment as AF @@ -49,7 +50,7 @@ tests = testGroup "Model" [ ] addBlocks :: LoE () -> [TestBlock] -> M.Model TestBlock -addBlocks loe blks = M.addBlocks cfg blks (M.empty loe testInitExtLedger) +addBlocks loe blks = M.addBlocks cfg blks (M.empty loe (convertMapKind testInitExtLedger)) where cfg = singleNodeTestConfig diff --git a/ouroboros-consensus/test/storage-test/Test/Ouroboros/Storage/ChainDB/StateMachine.hs b/ouroboros-consensus/test/storage-test/Test/Ouroboros/Storage/ChainDB/StateMachine.hs index eed9f661ea..01e3339009 100644 --- a/ouroboros-consensus/test/storage-test/Test/Ouroboros/Storage/ChainDB/StateMachine.hs +++ b/ouroboros-consensus/test/storage-test/Test/Ouroboros/Storage/ChainDB/StateMachine.hs @@ -70,6 +70,7 @@ module Test.Ouroboros.Storage.ChainDB.StateMachine ( import Codec.Serialise (Serialise) import Control.Monad (replicateM, void) +import Control.Monad.Base import Control.ResourceRegistry import Control.Tracer as CT import Data.Bifoldable @@ -102,6 +103,7 @@ import Ouroboros.Consensus.Ledger.Abstract import Ouroboros.Consensus.Ledger.Extended import Ouroboros.Consensus.Ledger.Inspect import Ouroboros.Consensus.Ledger.SupportsProtocol +import Ouroboros.Consensus.Ledger.Tables.Utils import Ouroboros.Consensus.Protocol.Abstract import Ouroboros.Consensus.Storage.ChainDB hiding (TraceFollowerEvent (..)) @@ -112,8 +114,8 @@ import Ouroboros.Consensus.Storage.Common (SizeInBytes) import qualified Ouroboros.Consensus.Storage.ImmutableDB as ImmutableDB import Ouroboros.Consensus.Storage.ImmutableDB.Chunks.Internal (unsafeChunkNoToEpochNo) -import Ouroboros.Consensus.Storage.LedgerDB (LedgerDB) -import qualified Ouroboros.Consensus.Storage.LedgerDB as LedgerDB +import qualified Ouroboros.Consensus.Storage.LedgerDB.Impl.Common as LedgerDB +import qualified Ouroboros.Consensus.Storage.LedgerDB.V1.DbChangelog as DbChangelog import qualified Ouroboros.Consensus.Storage.VolatileDB as VolatileDB import Ouroboros.Consensus.Util (split) import Ouroboros.Consensus.Util.CallStack @@ -164,7 +166,8 @@ data Cmd blk it flr -- ^ Advance the current slot to the block's slot (unless smaller than the -- current slot), add the block and run chain selection. | GetCurrentChain - | GetLedgerDB + -- TODO(js_ldb): reenable + -- GetLedgerDB | GetTipBlock | GetTipHeader | GetTipPoint @@ -247,7 +250,7 @@ deriving instance SOP.HasDatatypeInfo (Cmd blk it flr) data Success blk it flr = Unit () | Chain (AnchoredFragment (Header blk)) - | LedgerDB (LedgerDB (ExtLedgerState blk)) + | LedgerDB (DbChangelog.DbChangelog' blk) | MbBlock (Maybe blk) | MbAllComponents (Maybe (AllComponents blk)) | MbGCedAllComponents (MaybeGCedBlock (AllComponents blk)) @@ -297,23 +300,25 @@ type AllComponents blk = ) type TestConstraints blk = - ( ConsensusProtocol (BlockProtocol blk) - , LedgerSupportsProtocol blk - , BlockSupportsDiffusionPipelining blk - , InspectLedger blk - , Eq (ChainDepState (BlockProtocol blk)) - , Eq (LedgerState blk) - , Eq blk - , Show blk - , HasHeader blk - , StandardHash blk - , Serialise blk - , ModelSupportsBlock blk - , Eq (Header blk) - , Show (Header blk) - , ConvertRawHash blk - , HasHardForkHistory blk - , SerialiseDiskConstraints blk + ( ConsensusProtocol (BlockProtocol blk) + , LedgerSupportsProtocol blk + , BlockSupportsDiffusionPipelining blk + , InspectLedger blk + , Eq (ChainDepState (BlockProtocol blk)) + , Eq (LedgerState blk EmptyMK) + , Eq blk + , Show blk + , HasHeader blk + , StandardHash blk + , Serialise blk + , ModelSupportsBlock blk + , Eq (Header blk) + , Show (Header blk) + , ConvertRawHash blk + , HasHardForkHistory blk + , SerialiseDiskConstraints blk + , Show (LedgerState blk EmptyMK) + , LedgerTablesAreTrivial (LedgerState blk) ) deriving instance (TestConstraints blk, Eq it, Eq flr) @@ -351,7 +356,7 @@ data ChainDBEnv m blk = ChainDBEnv { } open :: - (IOLike m, TestConstraints blk) + (IOLike m, TestConstraints blk, MonadBase m m) => ChainDbArgs Identity m blk -> m (ChainDBState m blk) open args = do (chainDB, internal) <- openDBInternal args False @@ -361,7 +366,7 @@ open args = do -- PRECONDITION: the ChainDB is closed reopen :: - (IOLike m, TestConstraints blk) + (IOLike m, TestConstraints blk, MonadBase m m) => ChainDBEnv m blk -> m () reopen ChainDBEnv { varDB, args } = do chainDBState <- open args @@ -373,7 +378,7 @@ close ChainDBState { chainDB, addBlockAsync } = do closeDB chainDB run :: forall m blk. - (IOLike m, TestConstraints blk) + (IOLike m, TestConstraints blk, MonadBase m m) => ChainDBEnv m blk -> Cmd blk (TestIterator m blk) (TestFollower m blk) -> m (Success blk (TestIterator m blk) (TestFollower m blk)) @@ -381,7 +386,7 @@ run env@ChainDBEnv { varDB, .. } cmd = readTVarIO varDB >>= \st@ChainDBState { chainDB = ChainDB{..}, internal } -> case cmd of AddBlock blk -> Point <$> (advanceAndAdd st (blockSlot blk) blk) GetCurrentChain -> Chain <$> atomically getCurrentChain - GetLedgerDB -> LedgerDB <$> atomically getLedgerDB + -- GetLedgerDB -> LedgerDB . flush <$> atomically getDbChangelog -- TODO(jdral_ldb) GetTipBlock -> MbBlock <$> getTipBlock GetTipHeader -> MbHeader <$> getTipHeader GetTipPoint -> Point <$> atomically getTipPoint @@ -402,7 +407,7 @@ run env@ChainDBEnv { varDB, .. } cmd = Reopen -> Unit <$> reopen env PersistBlks -> ignore <$> persistBlks DoNotGarbageCollect internal PersistBlksThenGC -> ignore <$> persistBlks GarbageCollect internal - UpdateLedgerSnapshots -> ignore <$> intUpdateLedgerSnapshots internal + UpdateLedgerSnapshots -> ignore <$> intTryTakeSnapshot internal WipeVolatileDB -> Point <$> wipeVolatileDB st where mbGCedAllComponents = MbGCedAllComponents . MaybeGCedBlock True @@ -446,6 +451,33 @@ run env@ChainDBEnv { varDB, .. } cmd = giveWithEq a = fmap (`WithEq` a) $ atomically $ stateTVar varNextId $ \i -> (i, succ i) +-- | When the model is asked for the ledger DB, it reconstructs it by applying +-- the blocks in the current chain, starting from the initial ledger state. +-- Before the introduction of UTxO HD, this approach resulted in a ledger DB +-- equivalent to the one maintained by the SUT. However, after UTxO HD, this is +-- no longer the case since the ledger DB can be altered as the result of taking +-- snapshots or opening the ledger DB (for instance when we process the +-- 'WipeVolatileDB' command). Taking snapshots or opening the ledger DB cause +-- the ledger DB to be flushed, which modifies its sequence of volatile and +-- immutable states. +-- +-- The model does not have information about when the flushes occur and it +-- cannot infer that information in a reliable way since this depends on the low +-- level details of operations such as opening the ledger DB. Therefore, we +-- assume that the 'GetLedgerDB' command should return a flushed ledger DB, and +-- we use this function to implement such command both in the SUT and in the +-- model. +-- +-- When we compare the SUT and model's ledger DBs, by flushing we are not +-- comparing the immutable parts of the SUT and model's ledger DBs. However, +-- this was already the case in before the introduction of UTxO HD: if the +-- current chain contained more than K blocks, then the ledger states before the +-- immutable tip were not compared by the 'GetLedgerDB' command. +-- flush :: +-- (LedgerSupportsProtocol blk) +-- => DbChangelog.DbChangelog' blk -> DbChangelog.DbChangelog' blk +-- flush = snd . DbChangelog.splitForFlushing + persistBlks :: IOLike m => ShouldGarbageCollect -> ChainDB.Internal m blk -> m () persistBlks collectGarbage ChainDB.Internal{..} = do mSlotNo <- intCopyToImmutableDB @@ -611,7 +643,7 @@ runPure :: forall blk. runPure cfg = \case AddBlock blk -> ok Point $ update (add blk) GetCurrentChain -> ok Chain $ query (Model.volatileChain k getHeader) - GetLedgerDB -> ok LedgerDB $ query (Model.getLedgerDB cfg) +-- GetLedgerDB -> ok LedgerDB $ query (flush . Model.getDbChangelog cfg) GetTipBlock -> ok MbBlock $ query Model.tipBlock GetTipHeader -> ok MbHeader $ query (fmap getHeader . Model.tipBlock) GetTipPoint -> ok Point $ query Model.tipPoint @@ -741,7 +773,7 @@ deriving instance (TestConstraints blk, Show1 r) => Show (Model blk m r) initModel :: HasHeader blk => LoE () -> TopLevelConfig blk - -> ExtLedgerState blk + -> ExtLedgerState blk EmptyMK -> Model blk m r initModel loe cfg initLedger = Model { dbModel = Model.empty loe initLedger @@ -869,7 +901,7 @@ generator :: generator loe genBlock m@Model {..} = At <$> frequency [ (30, genAddBlock) , (if empty then 1 else 10, return GetCurrentChain) - , (if empty then 1 else 10, return GetLedgerDB) +-- , (if empty then 1 else 10, return GetLedgerDB) , (if empty then 1 else 10, return GetTipBlock) -- To check that we're on the right chain , (if empty then 1 else 10, return GetTipPoint) @@ -1165,7 +1197,7 @@ semantics :: forall blk. TestConstraints blk -> At Cmd blk IO Concrete -> IO (At Resp blk IO Concrete) semantics env (At cmd) = - At . (bimap (QSM.reference . QSM.Opaque) (QSM.reference . QSM.Opaque)) <$> + At . bimap (QSM.reference . QSM.Opaque) (QSM.reference . QSM.Opaque) <$> runIO env (bimap QSM.opaque QSM.opaque cmd) -- | The state machine proper @@ -1174,7 +1206,7 @@ sm :: TestConstraints blk -> ChainDBEnv IO blk -> BlockGen blk IO -> TopLevelConfig blk - -> ExtLedgerState blk + -> ExtLedgerState blk EmptyMK -> StateMachine (Model blk IO) (At Cmd blk IO) IO @@ -1207,7 +1239,7 @@ deriving instance ( ToExpr blk , ToExpr (HeaderHash blk) , ToExpr (ChainDepState (BlockProtocol blk)) , ToExpr (TipInfo blk) - , ToExpr (LedgerState blk) + , ToExpr (LedgerState blk EmptyMK) -- TODO why not mk? , ToExpr (ExtValidationError blk) ) => ToExpr (Model blk IO Concrete) @@ -1233,10 +1265,8 @@ deriving instance SOP.Generic (TraceGCEvent blk) deriving instance SOP.HasDatatypeInfo (TraceGCEvent blk) deriving instance SOP.Generic (TraceIteratorEvent blk) deriving instance SOP.HasDatatypeInfo (TraceIteratorEvent blk) -deriving instance SOP.Generic (LedgerDB.TraceSnapshotEvent blk) -deriving instance SOP.HasDatatypeInfo (LedgerDB.TraceSnapshotEvent blk) -deriving instance SOP.Generic (LedgerDB.TraceReplayEvent blk) -deriving instance SOP.HasDatatypeInfo (LedgerDB.TraceReplayEvent blk) +deriving instance SOP.Generic (LedgerDB.TraceLedgerDBEvent blk) +deriving instance SOP.HasDatatypeInfo (LedgerDB.TraceLedgerDBEvent blk) deriving instance SOP.Generic (ImmutableDB.TraceEvent blk) deriving instance SOP.HasDatatypeInfo (ImmutableDB.TraceEvent blk) deriving instance SOP.Generic (VolatileDB.TraceEvent blk) @@ -1494,7 +1524,7 @@ runCmdsLockstep loe (SmallChunkInfo chunkInfo) cmds = let args = mkArgs testCfg chunkInfo - testInitExtLedger + (testInitExtLedger `withLedgerTables` emptyLedgerTables) threadRegistry nodeDBs tracer @@ -1630,8 +1660,8 @@ traceEventName = \case TraceOpenEvent ev -> "Open." <> constrName ev TraceGCEvent ev -> "GC." <> constrName ev TraceIteratorEvent ev -> "Iterator." <> constrName ev - TraceSnapshotEvent ev -> "Ledger." <> constrName ev - TraceLedgerReplayEvent ev -> "LedgerReplay." <> constrName ev + TraceLedgerDBEvent ev -> "Ledger." <> constrName ev +-- TraceLedgerReplayEvent ev -> "LedgerReplay." <> constrName ev TraceImmutableDBEvent ev -> "ImmutableDB." <> constrName ev TraceVolatileDBEvent ev -> "VolatileDB." <> constrName ev TraceLastShutdownUnclean -> "LastShutdownUnclean" @@ -1639,7 +1669,7 @@ traceEventName = \case mkArgs :: IOLike m => TopLevelConfig Blk -> ImmutableDB.ChunkInfo - -> ExtLedgerState Blk + -> ExtLedgerState Blk ValuesMK -> ResourceRegistry m -> NodeDBs (StrictTMVar m MockFS) -> CT.Tracer m (TraceEvent Blk) diff --git a/ouroboros-consensus/test/storage-test/Test/Ouroboros/Storage/ChainDB/Unit.hs b/ouroboros-consensus/test/storage-test/Test/Ouroboros/Storage/ChainDB/Unit.hs index d82b1c58d0..f92e24cab3 100644 --- a/ouroboros-consensus/test/storage-test/Test/Ouroboros/Storage/ChainDB/Unit.hs +++ b/ouroboros-consensus/test/storage-test/Test/Ouroboros/Storage/ChainDB/Unit.hs @@ -16,6 +16,7 @@ module Test.Ouroboros.Storage.ChainDB.Unit (tests) where import Cardano.Slotting.Slot (WithOrigin (..)) import Control.Monad (replicateM, unless, void) +import Control.Monad.Base (MonadBase) import Control.Monad.Except (Except, ExceptT, MonadError, runExcept, runExceptT, throwError) import Control.Monad.Reader (MonadReader, ReaderT, ask, runReaderT) @@ -27,6 +28,7 @@ import Ouroboros.Consensus.Block.RealPoint (pointToWithOriginRealPoint) import Ouroboros.Consensus.Config (TopLevelConfig, configSecurityParam) +import Ouroboros.Consensus.Ledger.Basics import Ouroboros.Consensus.Ledger.Extended (ExtLedgerState) import Ouroboros.Consensus.Ledger.SupportsProtocol (LedgerSupportsProtocol) @@ -34,7 +36,6 @@ import qualified Ouroboros.Consensus.Storage.ChainDB.API as API import qualified Ouroboros.Consensus.Storage.ChainDB.API.Types.InvalidBlockPunishment as API import Ouroboros.Consensus.Storage.ChainDB.Impl (TraceEvent) import Ouroboros.Consensus.Storage.ChainDB.Impl.Args -import qualified Ouroboros.Consensus.Storage.ChainDB.Impl.Args as ChainDB import Ouroboros.Consensus.Storage.Common (StreamFrom (..), StreamTo (..)) import Ouroboros.Consensus.Storage.ImmutableDB.Chunks as ImmutableDB @@ -231,7 +232,7 @@ runSystemIO expr = runSystem withChainDbEnv expr >>= toAssertion where chunkInfo = ImmutableDB.simpleChunkInfo 100 topLevelConfig = mkTestCfg chunkInfo - withChainDbEnv = withTestChainDbEnv topLevelConfig chunkInfo testInitExtLedger + withChainDbEnv = withTestChainDbEnv topLevelConfig chunkInfo $ convertMapKind testInitExtLedger newtype TestFailure = TestFailure String deriving (Show) @@ -328,7 +329,7 @@ withModelContext f = do pure a -instance (Model.ModelSupportsBlock blk, LedgerSupportsProtocol blk) +instance (Model.ModelSupportsBlock blk, LedgerSupportsProtocol blk, LedgerTablesAreTrivial (LedgerState blk)) => SupportsUnitTest (ModelM blk) where type FollowerId (ModelM blk) = Model.FollowerId @@ -391,10 +392,10 @@ runSystem withChainDbEnv expr -- | Provide a standard ChainDbEnv for testing. withTestChainDbEnv :: - (IOLike m, TestConstraints blk) + (IOLike m, TestConstraints blk, MonadBase m m) => TopLevelConfig blk -> ImmutableDB.ChunkInfo - -> ExtLedgerState blk + -> ExtLedgerState blk ValuesMK -> (ChainDBEnv m blk -> m [TraceEvent blk] -> m a) -> m a withTestChainDbEnv topLevelConfig chunkInfo extLedgerState cont @@ -424,7 +425,7 @@ withTestChainDbEnv topLevelConfig chunkInfo extLedgerState cont closeChainDbEnv (env, _) = do readTVarIO (varDB env) >>= close closeRegistry (registry env) - closeRegistry (cdbsRegistry $ cdbsArgs $ args env) + closeRegistry (cdbsRegistry . cdbsArgs $ args env) chainDbArgs registry nodeDbs tracer = let args = fromMinimalChainDbArgs MinimalChainDbArgs @@ -434,8 +435,7 @@ withTestChainDbEnv topLevelConfig chunkInfo extLedgerState cont , mcdbRegistry = registry , mcdbNodeDBs = nodeDbs } - in ChainDB.updateTracer tracer args - + in updateTracer tracer args instance IOLike m => SupportsUnitTest (SystemM blk m) where diff --git a/ouroboros-consensus/test/storage-test/Test/Ouroboros/Storage/LedgerDB.hs b/ouroboros-consensus/test/storage-test/Test/Ouroboros/Storage/LedgerDB.hs index 9336e09cd7..5337e77694 100644 --- a/ouroboros-consensus/test/storage-test/Test/Ouroboros/Storage/LedgerDB.hs +++ b/ouroboros-consensus/test/storage-test/Test/Ouroboros/Storage/LedgerDB.hs @@ -6,14 +6,22 @@ -- module Test.Ouroboros.Storage.LedgerDB (tests) where -import qualified Test.Ouroboros.Storage.LedgerDB.DiskPolicy as DiskPolicy -import qualified Test.Ouroboros.Storage.LedgerDB.InMemory as InMemory -import qualified Test.Ouroboros.Storage.LedgerDB.OnDisk as OnDisk -import Test.Tasty +import qualified Test.Ouroboros.Storage.LedgerDB.Serialisation as Serialisation +import qualified Test.Ouroboros.Storage.LedgerDB.SnapshotPolicy as SnapshotPolicy +import qualified Test.Ouroboros.Storage.LedgerDB.StateMachine as StateMachine +import qualified Test.Ouroboros.Storage.LedgerDB.V1.BackingStore as BackingStore +import qualified Test.Ouroboros.Storage.LedgerDB.V1.DbChangelog.QuickCheck as DbChangelog.QuickCheck +import qualified Test.Ouroboros.Storage.LedgerDB.V1.DbChangelog.Unit as DbChangelog.Unit +import Test.Tasty (TestTree, testGroup) tests :: TestTree tests = testGroup "LedgerDB" [ - InMemory.tests - , OnDisk.tests - , DiskPolicy.tests + testGroup "V1" [ + BackingStore.tests + , DbChangelog.Unit.tests + , DbChangelog.QuickCheck.tests + ] + , SnapshotPolicy.tests + , Serialisation.tests + , StateMachine.tests ] diff --git a/ouroboros-consensus/test/storage-test/Test/Ouroboros/Storage/LedgerDB/OrphanArbitrary.hs b/ouroboros-consensus/test/storage-test/Test/Ouroboros/Storage/LedgerDB/OrphanArbitrary.hs deleted file mode 100644 index a515ab81db..0000000000 --- a/ouroboros-consensus/test/storage-test/Test/Ouroboros/Storage/LedgerDB/OrphanArbitrary.hs +++ /dev/null @@ -1,14 +0,0 @@ -{-# OPTIONS_GHC -Wno-orphans #-} - -module Test.Ouroboros.Storage.LedgerDB.OrphanArbitrary () where - -import Ouroboros.Consensus.Config.SecurityParam (SecurityParam (..)) -import Test.QuickCheck - -{------------------------------------------------------------------------------- - Orphan Arbitrary instances --------------------------------------------------------------------------------} - -instance Arbitrary SecurityParam where - arbitrary = SecurityParam <$> choose (0, 6) - shrink (SecurityParam k) = SecurityParam <$> shrink k diff --git a/ouroboros-consensus/test/storage-test/Test/Ouroboros/Storage/LedgerDB/Serialisation.hs b/ouroboros-consensus/test/storage-test/Test/Ouroboros/Storage/LedgerDB/Serialisation.hs new file mode 100644 index 0000000000..08405f0305 --- /dev/null +++ b/ouroboros-consensus/test/storage-test/Test/Ouroboros/Storage/LedgerDB/Serialisation.hs @@ -0,0 +1,72 @@ +{-# LANGUAGE TypeApplications #-} + +module Test.Ouroboros.Storage.LedgerDB.Serialisation (tests) where + +import Codec.CBOR.FlatTerm (FlatTerm, TermToken (..), fromFlatTerm, + toFlatTerm) +import Codec.Serialise (decode, encode) +import Ouroboros.Consensus.Block +import Ouroboros.Consensus.Storage.LedgerDB.Impl.Snapshots +import Test.Tasty +import Test.Tasty.HUnit +import Test.Util.Orphans.Arbitrary () +import Test.Util.TestBlock + +tests :: TestTree +tests = testGroup "Serialisation" [ + testCase "encode" test_encode_ledger + , testCase "decode" test_decode_ledger + , testCase "decode ChainSummary" test_decode_ChainSummary + ] + +{------------------------------------------------------------------------------- + Serialisation +-------------------------------------------------------------------------------} + +-- | The LedgerDB is parametric in the ledger @l@. We use @Int@ for simplicity. +example_ledger :: Int +example_ledger = 100 + +golden_ledger :: FlatTerm +golden_ledger = + [ TkListLen 2 + -- VersionNumber + , TkInt 1 + -- ledger: Int + , TkInt 100 + ] + +-- | The old format based on the @ChainSummary@. To remain backwards compatible +-- we still accept this old format. +golden_ChainSummary :: FlatTerm +golden_ChainSummary = + [ TkListLen 3 + -- tip: WithOrigin (RealPoint TestBlock) + , TkListLen 1 + , TkListLen 2 + , TkInt 3 + , TkListBegin, TkInt 0, TkInt 0, TkBreak + -- chain length: Word64 + , TkInt 10 + -- ledger: Int for simplicity + , TkInt 100 + ] + +test_encode_ledger :: Assertion +test_encode_ledger = + toFlatTerm (enc example_ledger) @?= golden_ledger + where + enc = encodeL encode + +test_decode_ledger :: Assertion +test_decode_ledger = + fromFlatTerm dec golden_ledger @?= Right example_ledger + where + dec = decodeLBackwardsCompatible (Proxy @TestBlock) decode decode + +-- | For backwards compatibility +test_decode_ChainSummary :: Assertion +test_decode_ChainSummary = + fromFlatTerm dec golden_ChainSummary @?= Right example_ledger + where + dec = decodeLBackwardsCompatible (Proxy @TestBlock) decode decode diff --git a/ouroboros-consensus/test/storage-test/Test/Ouroboros/Storage/LedgerDB/DiskPolicy.hs b/ouroboros-consensus/test/storage-test/Test/Ouroboros/Storage/LedgerDB/SnapshotPolicy.hs similarity index 87% rename from ouroboros-consensus/test/storage-test/Test/Ouroboros/Storage/LedgerDB/DiskPolicy.hs rename to ouroboros-consensus/test/storage-test/Test/Ouroboros/Storage/LedgerDB/SnapshotPolicy.hs index 3c320ad973..8181a03851 100644 --- a/ouroboros-consensus/test/storage-test/Test/Ouroboros/Storage/LedgerDB/DiskPolicy.hs +++ b/ouroboros-consensus/test/storage-test/Test/Ouroboros/Storage/LedgerDB/SnapshotPolicy.hs @@ -3,25 +3,21 @@ {-# LANGUAGE NumericUnderscores #-} {-# LANGUAGE TypeApplications #-} -module Test.Ouroboros.Storage.LedgerDB.DiskPolicy (tests) where +module Test.Ouroboros.Storage.LedgerDB.SnapshotPolicy (tests) where import Data.Time.Clock (DiffTime, diffTimeToPicoseconds, picosecondsToDiffTime, secondsToDiffTime) import Data.Word import Ouroboros.Consensus.Config.SecurityParam (SecurityParam (..)) -import Ouroboros.Consensus.Storage.LedgerDB (DiskPolicy (..), - NumOfDiskSnapshots (..), SnapshotInterval (..), - TimeSinceLast (..), mkDiskPolicy) -import Ouroboros.Consensus.Storage.LedgerDB.DiskPolicy - (DiskPolicyArgs (DiskPolicyArgs)) +import Ouroboros.Consensus.Storage.LedgerDB.Impl.Snapshots import Test.QuickCheck import Test.Tasty import Test.Tasty.QuickCheck tests :: TestTree tests = - testGroup "DiskPolicy" [ - testGroup "defaultDiskPolicy" [ + testGroup "SnapshotPolicy" [ + testGroup "defaultSnapshotPolicy" [ testProperty "onDiskNumSnapshots" prop_onDiskNumSnapshots , testProperty "onDiskShouldTakeSnapshot" prop_onDiskShouldTakeSnapshot ] @@ -35,26 +31,27 @@ tests = data TestSetup = TestSetup { -- | argument to 'onDiskShouldTakeSnapshot' tsBlocksSince :: Word64 - -- | argument to 'defaultDiskPolicy' + -- | argument to 'defaultSnapshotPolicy' , tsK :: SecurityParam - -- | argument to 'defaultDiskPolicy' + -- | argument to 'defaultSnapshotPolicy' , tsSnapshotInterval :: SnapshotInterval -- | argument to 'onDiskShouldTakeSnapshot' - , tsTimeSince :: TimeSinceLast DiffTime + , tsTimeSince :: Maybe DiffTime } deriving (Show) --- | The represented default 'DiskPolicy' -toDiskPolicy :: TestSetup -> DiskPolicy -toDiskPolicy ts = mkDiskPolicy (tsK ts) diskPolicyArgs + +-- | The represented default 'SnapshotPolicy' +toSnapshotPolicy :: TestSetup -> SnapshotPolicy +toSnapshotPolicy ts = defaultSnapshotPolicy (tsK ts) snapshotPolicyArgs where - diskPolicyArgs = - DiskPolicyArgs (tsSnapshotInterval ts) DefaultNumOfDiskSnapshots + snapshotPolicyArgs = + SnapshotPolicyArgs (tsSnapshotInterval ts) DefaultNumOfDiskSnapshots -- | The result of the represented call to 'onDiskShouldTakeSnapshot' shouldTakeSnapshot :: TestSetup -> Bool shouldTakeSnapshot ts = onDiskShouldTakeSnapshot - (toDiskPolicy ts) + (toSnapshotPolicy ts) (tsTimeSince ts) (tsBlocksSince ts) @@ -121,7 +118,7 @@ instance Arbitrary TestSetup where tsBlocksSince = b , tsK = SecurityParam k , tsSnapshotInterval - , tsTimeSince = maybe NoSnapshotTakenYet TimeSinceLast t + , tsTimeSince = t } where -- 100 years seems a reasonable upper bound for consideration @@ -159,10 +156,11 @@ instance Arbitrary TestSetup where . diffTimeToPicoseconds shrinkTSL shnk = \case - NoSnapshotTakenYet -> [] - TimeSinceLast d -> NoSnapshotTakenYet : fmap TimeSinceLast (shnk d) + Nothing -> [] + Just d -> Nothing : fmap Just (shnk d) shrinkSnapshotInterval = \case + DisableSnapshots -> [] DefaultSnapshotInterval -> [] RequestedSnapshotInterval d -> DefaultSnapshotInterval @@ -172,12 +170,12 @@ instance Arbitrary TestSetup where Properties -------------------------------------------------------------------------------} --- | Check 'onDiskNumSnapshots' of 'defaultDiskPolicy' +-- | Check 'onDiskNumSnapshots' of 'defaultSnapshotPolicy' prop_onDiskNumSnapshots :: TestSetup -> Property prop_onDiskNumSnapshots ts = -- 'TestSetup' has more information than we need for this property counterexample "should always be 2" - $ onDiskNumSnapshots (toDiskPolicy ts) === 2 + $ onDiskNumSnapshots (toSnapshotPolicy ts) === 2 minBlocksBeforeSnapshot :: Word64 minBlocksBeforeSnapshot = 50_000 @@ -185,16 +183,16 @@ minBlocksBeforeSnapshot = 50_000 minSecondsBeforeSnapshot :: Integer minSecondsBeforeSnapshot = 6 * 60 --- | Check 'onDiskShouldTakeSnapshot' of 'defaultDiskPolicy' +-- | Check 'onDiskShouldTakeSnapshot' of 'defaultSnapshotPolicy' prop_onDiskShouldTakeSnapshot :: TestSetup -> Property prop_onDiskShouldTakeSnapshot ts = counterexample ("decided to take snapshot? " ++ show (shouldTakeSnapshot ts)) $ case t of - NoSnapshotTakenYet -> + Nothing -> counterexample "haven't taken a snapshot yet" $ counterexample "should take snapshot if it processed at least k blocks" $ shouldTakeSnapshot ts === (blocksSinceLast >= k) - TimeSinceLast timeSinceLast -> + Just timeSinceLast -> counterexample "have previously taken a snapshot" $ isDisjunctionOf (shouldTakeSnapshot ts `named` "the decision") [ systemChecksHowMuchTimeHasPassed timeSinceLast @@ -223,6 +221,8 @@ prop_onDiskShouldTakeSnapshot ts = (timeSinceLast >= interval) `named` "time since last is greater then explicitly requested interval" + DisableSnapshots -> error "Will never call this test with this value" + systemChecksHowManyBlocksWereProcessed :: DiffTime -> NamedValue Bool systemChecksHowManyBlocksWereProcessed timeSinceLast = disjunct `named` msg diff --git a/ouroboros-consensus/test/storage-test/Test/Ouroboros/Storage/LedgerDB/StateMachine.hs b/ouroboros-consensus/test/storage-test/Test/Ouroboros/Storage/LedgerDB/StateMachine.hs new file mode 100644 index 0000000000..1f26c8b8de --- /dev/null +++ b/ouroboros-consensus/test/storage-test/Test/Ouroboros/Storage/LedgerDB/StateMachine.hs @@ -0,0 +1,537 @@ +{-# LANGUAGE CPP #-} +{-# LANGUAGE ConstraintKinds #-} +{-# LANGUAGE DataKinds #-} +{-# LANGUAGE DeriveGeneric #-} +{-# LANGUAGE DerivingStrategies #-} +{-# LANGUAGE ExistentialQuantification #-} +{-# LANGUAGE FlexibleContexts #-} +{-# LANGUAGE FlexibleInstances #-} +{-# LANGUAGE GADTs #-} +{-# LANGUAGE InstanceSigs #-} +{-# LANGUAGE MultiParamTypeClasses #-} +{-# LANGUAGE NamedFieldPuns #-} +{-# LANGUAGE PackageImports #-} +{-# LANGUAGE PolyKinds #-} +{-# LANGUAGE RankNTypes #-} +{-# LANGUAGE ScopedTypeVariables #-} +{-# LANGUAGE StandaloneDeriving #-} +{-# LANGUAGE TypeFamilies #-} +{-# LANGUAGE UndecidableInstances #-} + +{-# OPTIONS_GHC -Wno-orphans #-} +#if __GLASGOW_HASKELL__ >= 908 +{-# OPTIONS_GHC -Wno-x-partial #-} +#endif + +-- | On-disk ledger DB tests. +-- +-- This is a state-machine based test. The commands here are +-- +-- * Get the current volatile and immutable tip +-- * Switch to a fork (possibly rolling back 0 blocks, so equivalent to a push) +-- * Write a snapshot to disk +-- * Restore the ledger DB from the snapshots on disk +-- * Model disk corruption (truncate or delete latest snapshot) +-- +-- The model here is satisfyingly simple: just a map from blocks to their +-- corresponding ledger state modelling the whole block chain since genesis. +module Test.Ouroboros.Storage.LedgerDB.StateMachine (tests) where + +import Control.Monad.Except +import Control.Monad.State hiding (state) +import Control.ResourceRegistry +import Control.Tracer (nullTracer) +import qualified Data.List as L +import Data.Map.Strict (Map) +import qualified Data.Map.Strict as Map +import qualified Data.SOP.Dict as Dict +import Data.Word +import Ouroboros.Consensus.Block +import Ouroboros.Consensus.Config +import Ouroboros.Consensus.Ledger.Abstract +import Ouroboros.Consensus.Ledger.Extended +import Ouroboros.Consensus.Ledger.Tables.Utils +import qualified Ouroboros.Consensus.Storage.ChainDB.Impl.BlockCache as BlockCache +import Ouroboros.Consensus.Storage.ImmutableDB.Impl.Stream +import Ouroboros.Consensus.Storage.LedgerDB.API as LedgerDB +import Ouroboros.Consensus.Storage.LedgerDB.API.Config +import Ouroboros.Consensus.Storage.LedgerDB.Impl.Args as Args +import Ouroboros.Consensus.Storage.LedgerDB.Impl.Init +import Ouroboros.Consensus.Storage.LedgerDB.Impl.Snapshots +import Ouroboros.Consensus.Storage.LedgerDB.V1.Args +import Ouroboros.Consensus.Storage.LedgerDB.V1.BackingStore.API +import Ouroboros.Consensus.Storage.LedgerDB.V1.BackingStore.Impl.LMDB +import Ouroboros.Consensus.Storage.LedgerDB.V1.Init as V1 +import Ouroboros.Consensus.Storage.LedgerDB.V2.Args +import Ouroboros.Consensus.Storage.LedgerDB.V2.Init as V2 +import Ouroboros.Consensus.Util hiding (Some) +import Ouroboros.Consensus.Util.Args +import Ouroboros.Consensus.Util.IOLike +import qualified Ouroboros.Network.AnchoredSeq as AS +import qualified System.Directory as Dir +import System.FS.API +import qualified System.FS.IO as FSIO +import qualified System.FS.Sim.MockFS as MockFS +import System.FS.Sim.STM +import qualified System.IO.Temp as Temp +import Test.Ouroboros.Storage.LedgerDB.StateMachine.TestBlock +import qualified Test.QuickCheck as QC +import "quickcheck-dynamic" Test.QuickCheck.Extras +import qualified Test.QuickCheck.Monadic as QC +import Test.QuickCheck.StateModel +import Test.Tasty +import Test.Tasty.QuickCheck +import Test.Util.TestBlock hiding (TestBlock, TestBlockCodecConfig, + TestBlockStorageConfig) + +tests :: TestTree +tests = testGroup "StateMachine" [ + testProperty "InMemV1" $ + prop_sequential 100000 inMemV1TestArguments simulatedFS + , testProperty "InMemV2" $ + prop_sequential 100000 inMemV2TestArguments simulatedFS + , testProperty "LMDB" $ + prop_sequential 1000 lmdbTestArguments realFS + ] + +prop_sequential :: + Int + -> (SecurityParam -> SomeHasFS IO -> TestArguments IO) + -> IO (SomeHasFS IO, IO ()) + -> Actions Model + -> QC.Property +prop_sequential maxSuccess mkTestArguments fsOps as = QC.withMaxSuccess maxSuccess $ + QC.monadicIO $ do + ref <- lift $ initialEnvironment fsOps mkTestArguments =<< initChainDB + (_, Environment _ testInternals _ _ _ clean) <- runPropertyStateT (runActions as) ref + QC.run $ closeLedgerDB testInternals >> clean + QC.assert True + +-- | The initial environment is mostly undefined because it will be initialized +-- by the @Init@ command. We are forced to provide this dummy implementation +-- because some parts of it are static (which we can provide now) and also the +-- empty sequence of commands must still run the cleanup functions, which here +-- are trivial, but nevertheless they have to exist. +initialEnvironment :: + IO (SomeHasFS IO, IO ()) + -> (SecurityParam -> SomeHasFS IO -> TestArguments IO) + -> ChainDB IO + -> IO Environment +initialEnvironment fsOps mkTestArguments cdb = do + (sfs, cleanupFS) <- fsOps + pure $ Environment + undefined + (TestInternals undefined undefined undefined undefined (pure ())) + cdb + (flip mkTestArguments sfs) + sfs + cleanupFS + +{------------------------------------------------------------------------------- + Arguments +-------------------------------------------------------------------------------} + +data TestArguments m = TestArguments { + argFlavorArgs :: !(Complete Args.LedgerDbFlavorArgs m) + , argLedgerDbCfg :: !(LedgerDbCfg (ExtLedgerState TestBlock)) + } + +simulatedFS :: IO (SomeHasFS IO, IO ()) +simulatedFS = do + fs <- simHasFS' MockFS.empty + pure (SomeHasFS fs , pure ()) + +realFS :: IO (SomeHasFS IO, IO ()) +realFS = liftIO $ do + systmpdir <- Dir.getTemporaryDirectory + tmpdir <- Temp.createTempDirectory systmpdir "init_standalone_db" + pure (SomeHasFS $ FSIO.ioHasFS $ MountPoint tmpdir, Dir.removeDirectoryRecursive tmpdir) + +inMemV1TestArguments :: + SecurityParam + -> SomeHasFS IO + -> TestArguments IO +inMemV1TestArguments secParam _ = + TestArguments { + argFlavorArgs = LedgerDbFlavorArgsV1 $ V1Args DisableFlushing DisableQuerySize InMemoryBackingStoreArgs + , argLedgerDbCfg = extLedgerDbConfig secParam + } + +inMemV2TestArguments :: + SecurityParam + -> SomeHasFS IO + -> TestArguments IO +inMemV2TestArguments secParam _ = + TestArguments { + argFlavorArgs = LedgerDbFlavorArgsV2 $ V2Args InMemoryHandleArgs + , argLedgerDbCfg = extLedgerDbConfig secParam + } + +testLMDBLimits :: LMDBLimits +testLMDBLimits = LMDBLimits + { -- 100 MiB should be more than sufficient for the tests we're running here. + -- If the database were to grow beyond 100 Mebibytes, resulting in a test + -- error, then something in the LMDB backing store or tests has changed and + -- we should reconsider this value. + lmdbMapSize = 100 * 1024 * 1024 + -- 3 internal databases: 1 for the settings, 1 for the state, and 1 for the + -- ledger tables. + , lmdbMaxDatabases = 3 + , lmdbMaxReaders = 16 + } + +lmdbTestArguments :: + SecurityParam + -> SomeHasFS IO + -> TestArguments IO +lmdbTestArguments secParam fs = + TestArguments { + argFlavorArgs = LedgerDbFlavorArgsV1 $ V1Args DisableFlushing DisableQuerySize $ LMDBBackingStoreArgs (LiveLMDBFS fs) testLMDBLimits Dict.Dict + , argLedgerDbCfg = extLedgerDbConfig secParam + } + +{------------------------------------------------------------------------------- + Model +-------------------------------------------------------------------------------} + +type TheBlockChain = + AS.AnchoredSeq + (WithOrigin SlotNo) + (ExtLedgerState TestBlock ValuesMK) + (TestBlock, ExtLedgerState TestBlock ValuesMK) + +data Model = + UnInit + | Model + TheBlockChain + SecurityParam + deriving (Generic, Show) + +instance AS.Anchorable + (WithOrigin SlotNo) + (ExtLedgerState TestBlock ValuesMK) + (TestBlock, ExtLedgerState TestBlock ValuesMK) where + asAnchor = snd + getAnchorMeasure _ = getTipSlot + +instance HasVariables TheBlockChain where + getAllVariables _ = mempty + +modelUpdateLedger :: + StateT + TheBlockChain + (Except (ExtValidationError TestBlock)) a + -> Model + -> Model +modelUpdateLedger f model@(Model chain secParam) = + case runExcept (runStateT f chain) of + Left{} -> model + Right (_, ledger') -> Model ledger' secParam +modelUpdateLedger _ _ = error "Uninitialized model tried to apply blocks!" + +modelRollback :: Word64 -> Model -> Model +modelRollback n (Model chain secParam) = + Model (AS.dropNewest (fromIntegral n) chain) secParam +modelRollback _ UnInit = error "Uninitialized model can't rollback!" + +{------------------------------------------------------------------------------- + StateModel +-------------------------------------------------------------------------------} + +deriving instance Show (Action Model a) +deriving instance Eq (Action Model a) + +instance HasVariables (Action Model a) where + getAllVariables _ = mempty + +instance StateModel Model where + data Action Model a where + WipeLedgerDB :: Action Model () + TruncateSnapshots :: Action Model () + DropAndRestore :: Word64 -> Action Model () + ForceTakeSnapshot :: Action Model () + GetState :: Action Model (ExtLedgerState TestBlock EmptyMK, ExtLedgerState TestBlock EmptyMK) + Init :: SecurityParam -> Action Model () + ValidateAndCommit :: Word64 -> [TestBlock] -> Action Model () + + actionName WipeLedgerDB{} = "WipeLedgerDB" + actionName TruncateSnapshots{} = "TruncateSnapshots" + actionName DropAndRestore{} = "DropAndRestore" + actionName ForceTakeSnapshot = "TakeSnapshot" + actionName GetState{} = "GetState" + actionName Init{} = "Init" + actionName ValidateAndCommit{} = "ValidateAndCommit" + + arbitraryAction _ UnInit = Some . Init <$> QC.arbitrary + arbitraryAction _ model@(Model chain secParam) = + frequency $ [ (2, pure $ Some GetState) + , (2, pure $ Some ForceTakeSnapshot) + , (1, Some . DropAndRestore <$> QC.choose (0, fromIntegral $ AS.length chain)) + , (4, Some <$> do + let maxRollback = minimum [ + fromIntegral . AS.length $ chain + , maxRollbacks secParam + ] + numRollback <- QC.choose (0, maxRollback) + numNewBlocks <- QC.choose (numRollback, numRollback + 2) + let + chain' = case modelRollback numRollback model of + UnInit -> error "Impossible" + Model ch _ -> ch + blocks = genBlocks + numNewBlocks + (lastAppliedPoint . ledgerState . either id snd . AS.head $ chain') + return $ ValidateAndCommit numRollback blocks) + , (1, pure $ Some WipeLedgerDB) + , (1, pure $ Some TruncateSnapshots) + ] + + initialState = UnInit + + nextState _ (Init secParam) _var = Model (AS.Empty genesis) secParam + nextState state GetState _var = state + nextState state ForceTakeSnapshot _var = state + nextState state@(Model _ secParam) (ValidateAndCommit n blks) _var = + modelUpdateLedger switch state + where + push :: TestBlock -> StateT (AS.AnchoredSeq (WithOrigin SlotNo) (ExtLedgerState TestBlock ValuesMK) (TestBlock, ExtLedgerState TestBlock ValuesMK)) (Except (ExtValidationError TestBlock)) () + push b = do + ls <- get + let tip = either id snd $ AS.head ls + l' <- lift $ tickThenApply (ledgerDbCfg $ extLedgerDbConfig secParam) b tip + put (ls AS.:> (b, applyDiffs tip l')) + + switch :: StateT (AS.AnchoredSeq (WithOrigin SlotNo) (ExtLedgerState TestBlock ValuesMK) (TestBlock, ExtLedgerState TestBlock ValuesMK)) (Except (ExtValidationError TestBlock)) () + switch = do + modify $ AS.dropNewest (fromIntegral n) + mapM_ push blks + + nextState state WipeLedgerDB _var = state + nextState state TruncateSnapshots _var = state + nextState state (DropAndRestore n) _var = modelRollback n state + nextState UnInit _ _ = error "Uninitialized model created a command different than Init" + + precondition UnInit Init{} = True + precondition UnInit _ = False + precondition (Model chain secParam) (ValidateAndCommit n blks) = + n <= min (maxRollbacks secParam) (fromIntegral $ AS.length chain) + && case blks of + [] -> True + (b:_) -> tbSlot b == 1 + withOrigin 0 id (getTipSlot (AS.headAnchor chain)) + precondition _ Init{} = False + precondition _ _ = True + +{------------------------------------------------------------------------------- + Mocked ChainDB +-------------------------------------------------------------------------------} + +-- | Mocked chain db +data ChainDB m = ChainDB { + -- | Block storage + dbBlocks :: StrictTVar m (Map (RealPoint TestBlock) TestBlock) + + -- | Current chain and corresponding ledger state + -- + -- Invariant: all references @r@ here must be present in 'dbBlocks'. + , dbChain :: StrictTVar m [RealPoint TestBlock] + } + +initChainDB :: + forall m. (MonadIO m, IOLike m) + => m (ChainDB m) +initChainDB = do + dbBlocks <- uncheckedNewTVarM Map.empty + dbChain <- uncheckedNewTVarM [] + return $ ChainDB dbBlocks dbChain + +dbStreamAPI :: + forall m. IOLike m + => SecurityParam + -> ChainDB m + -> m (StreamAPI m TestBlock TestBlock, [TestBlock]) +dbStreamAPI secParam chainDb = + atomically $ do + points <- reverse . take (fromIntegral $ maxRollbacks secParam) <$> readTVar dbChain + blks <- readTVar dbBlocks + pure $ (StreamAPI streamAfter, map (blks Map.!) points) + where + ChainDB { + dbBlocks + , dbChain + } = chainDb + + streamAfter :: + Point TestBlock + -> (Either (RealPoint TestBlock) (m (NextItem TestBlock)) -> m a) + -> m a + streamAfter tip k = do + pts <- atomically $ reverse . drop (fromIntegral $ maxRollbacks secParam) <$> readTVar dbChain + case tip' of + NotOrigin pt + | pt `L.notElem` pts + -> k $ Left pt + _otherwise + -> do toStream <- uncheckedNewTVarM (blocksToStream tip' pts) + k (Right (getNext toStream)) + where + tip' = pointToWithOriginRealPoint tip + + -- Blocks to stream + -- + -- Precondition: tip must be on the current chain + blocksToStream :: + WithOrigin (RealPoint TestBlock) + -> [RealPoint TestBlock] -> [RealPoint TestBlock] + blocksToStream Origin = id + blocksToStream (NotOrigin r) = tail . dropWhile (/= r) + + getNext :: StrictTVar m [RealPoint TestBlock] -> m (NextItem TestBlock) + getNext toStream = do + mr <- atomically $ do + rs <- readTVar toStream + case rs of + [] -> return Nothing + r:rs' -> writeTVar toStream rs' >> return (Just r) + case mr of + Nothing -> return NoMoreItems + Just r -> do mb <- atomically $ Map.lookup r <$> readTVar dbBlocks + case mb of + Just b -> return $ NextItem b + Nothing -> error blockNotFound + +blockNotFound :: String +blockNotFound = concat [ + "dbStreamAPI: " + , "invariant violation: " + , "block in dbChain not present in dbBlocks" + ] + +{------------------------------------------------------------------------------- + New SUT +-------------------------------------------------------------------------------} + +openLedgerDB :: + Complete Args.LedgerDbFlavorArgs IO + -> ChainDB IO + -> LedgerDbCfg (ExtLedgerState TestBlock) + -> SomeHasFS IO + -> IO (LedgerDB' IO TestBlock, TestInternals' IO TestBlock) +openLedgerDB flavArgs env cfg fs = do + (stream, volBlocks) <- dbStreamAPI (ledgerDbCfgSecParam cfg) env + let getBlock f = Map.findWithDefault (error blockNotFound) f <$> readTVarIO (dbBlocks env) + replayGoal <- fmap (realPointToPoint . last . Map.keys) . atomically $ readTVar (dbBlocks env) + rr <- unsafeNewRegistry + let args = LedgerDbArgs + (SnapshotPolicyArgs DisableSnapshots DefaultNumOfDiskSnapshots) + (pure genesis) + fs + cfg + nullTracer + flavArgs + rr + Nothing + (ldb, _, od) <- case flavArgs of + LedgerDbFlavorArgsV1 bss -> + let initDb = V1.mkInitDb + args + bss + getBlock + in + openDBInternal args initDb stream replayGoal + LedgerDbFlavorArgsV2 bss -> + let initDb = V2.mkInitDb + args + bss + getBlock + in + openDBInternal args initDb stream replayGoal + withRegistry $ \reg -> do + vr <- validate ldb reg (const $ pure ()) BlockCache.empty 0 (map getHeader volBlocks) + case vr of + ValidateSuccessful forker -> do + atomically (forkerCommit forker) + forkerClose forker + _ -> error "Couldn't restart the chain, failed to apply volatile blocks!" + pure (ldb, od) + +{------------------------------------------------------------------------------- + RunModel +-------------------------------------------------------------------------------} + +-- | The environment for the monad in which we will run the test +data Environment = + Environment + (LedgerDB' IO TestBlock) + (TestInternals' IO TestBlock) + (ChainDB IO) + (SecurityParam -> TestArguments IO) + (SomeHasFS IO) + (IO ()) + +instance RunModel Model (StateT Environment IO) where + + perform _ (Init secParam) _ = do + Environment _ _ chainDb mkArgs fs cleanup <- get + (ldb, testInternals) <- lift $ do + let args = mkArgs secParam + openLedgerDB (argFlavorArgs args) chainDb (argLedgerDbCfg args) fs + put (Environment ldb testInternals chainDb mkArgs fs cleanup) + + perform _ WipeLedgerDB _ = do + Environment _ testInternals _ _ _ _ <- get + lift $ wipeLedgerDB testInternals + + perform _ GetState _ = do + Environment ldb _ _ _ _ _ <- get + lift $ atomically $ (,) <$> getImmutableTip ldb <*> getVolatileTip ldb + + perform _ ForceTakeSnapshot _ = do + Environment _ testInternals _ _ _ _ <- get + lift $ takeSnapshotNOW testInternals Nothing + + perform _ (ValidateAndCommit n blks) _ = do + Environment ldb _ chainDb _ _ _ <- get + lift $ do + atomically $ modifyTVar (dbBlocks chainDb) $ + repeatedly (uncurry Map.insert) (map (\b -> (blockRealPoint b, b)) blks) + withRegistry $ \rr -> do + vr <- validate ldb rr (const $ pure ()) BlockCache.empty n (map getHeader blks) + case vr of + ValidateSuccessful forker -> do + atomically $ modifyTVar (dbChain chainDb) (reverse (map blockRealPoint blks) ++) + atomically (forkerCommit forker) + forkerClose forker + ValidateExceededRollBack{} -> error "Unexpected Rollback" + ValidateLedgerError (AnnLedgerError forker _ _) -> forkerClose forker >> error "Unexpected ledger error" + + perform state@(Model _ secParam) (DropAndRestore n) lk = do + Environment _ testInternals chainDb _ _ _ <- get + lift $ do + atomically $ modifyTVar (dbChain chainDb) (drop (fromIntegral n)) + closeLedgerDB testInternals + perform state (Init secParam) lk + + perform _ TruncateSnapshots _ = do + Environment _ testInternals _ _ _ _ <- get + lift $ truncateSnapshots testInternals + + perform UnInit _ _ = error "Uninitialized model created a command different than Init" + + + -- NOTE + -- + -- In terms of postcondition, we only need to check that the immutable and + -- volatile tip are the right ones. By the blocks validating one on top of + -- each other it already implies that having the right volatile tip means that + -- we have the right whole chain. + postcondition (Model chain secParam, _) GetState _ (imm, vol) = + let volSt = either forgetLedgerTables (forgetLedgerTables . snd) (AS.head chain) + immSt = either forgetLedgerTables (forgetLedgerTables . snd) (AS.head (AS.dropNewest (fromIntegral $ maxRollbacks secParam) chain)) + in do + counterexamplePost $ unlines [ "VolSt: ", show volSt + , "VolSut: ", show vol + , "ImmSt: ", show immSt + , "ImmSut: ", show imm + ] + pure $ volSt == vol && immSt == imm + postcondition _ _ _ _ = pure True diff --git a/ouroboros-consensus/test/storage-test/Test/Ouroboros/Storage/LedgerDB/StateMachine/TestBlock.hs b/ouroboros-consensus/test/storage-test/Test/Ouroboros/Storage/LedgerDB/StateMachine/TestBlock.hs new file mode 100644 index 0000000000..0da95781f8 --- /dev/null +++ b/ouroboros-consensus/test/storage-test/Test/Ouroboros/Storage/LedgerDB/StateMachine/TestBlock.hs @@ -0,0 +1,351 @@ +{-# LANGUAGE CPP #-} +{-# LANGUAGE ConstraintKinds #-} +{-# LANGUAGE DataKinds #-} +{-# LANGUAGE DeriveAnyClass #-} +{-# LANGUAGE DeriveGeneric #-} +{-# LANGUAGE DerivingStrategies #-} +{-# LANGUAGE ExistentialQuantification #-} +{-# LANGUAGE FlexibleContexts #-} +{-# LANGUAGE FlexibleInstances #-} +{-# LANGUAGE GeneralizedNewtypeDeriving #-} +{-# LANGUAGE InstanceSigs #-} +{-# LANGUAGE NamedFieldPuns #-} +{-# LANGUAGE RankNTypes #-} +{-# LANGUAGE ScopedTypeVariables #-} +{-# LANGUAGE StandaloneDeriving #-} +{-# LANGUAGE TypeFamilies #-} +{-# LANGUAGE UndecidableInstances #-} + +{-# OPTIONS_GHC -Wno-orphans #-} + +module Test.Ouroboros.Storage.LedgerDB.StateMachine.TestBlock ( + TestBlock + , extLedgerDbConfig + , genBlocks + , genesis + ) where + +import Cardano.Binary (FromCBOR (..), ToCBOR (..)) +import qualified Cardano.Slotting.Slot as WithOrigin +import qualified Codec.CBOR.Decoding as CBOR +import qualified Codec.CBOR.Encoding as CBOR +import Codec.Serialise (Serialise) +import qualified Codec.Serialise as S +import Data.Foldable (toList) +import Data.List.NonEmpty (nonEmpty) +import qualified Data.Map.Diff.Strict.Internal as DS +import Data.Map.Strict (Map) +import qualified Data.Map.Strict as Map +import Data.Maybe (fromJust) +import Data.Maybe.Strict +import Data.Set (Set) +import qualified Data.Set as Set +import Data.TreeDiff +import Data.Word +import GHC.Generics (Generic) +import Ouroboros.Consensus.Block +import Ouroboros.Consensus.Config +import Ouroboros.Consensus.HardFork.Abstract +import Ouroboros.Consensus.Ledger.Abstract hiding (Key, Value) +import qualified Ouroboros.Consensus.Ledger.Abstract as Ledger +import Ouroboros.Consensus.Ledger.Extended +import qualified Ouroboros.Consensus.Ledger.Tables.DiffSeq as DS +import Ouroboros.Consensus.Ledger.Tables.Utils +import Ouroboros.Consensus.Storage.LedgerDB.API.Config +import Ouroboros.Consensus.Util.IOLike +import Ouroboros.Network.Block (Point (Point)) +import Ouroboros.Network.Point (Block (Block)) +import Prelude hiding (elem) +import qualified Test.QuickCheck as QC +import Test.Tasty.QuickCheck +import Test.Util.Orphans.Arbitrary () +import Test.Util.TestBlock hiding (TestBlock, TestBlockCodecConfig, + TestBlockStorageConfig) +import Test.Util.ToExpr () + +{------------------------------------------------------------------------------- + TestBlock +-------------------------------------------------------------------------------} + +type TestBlock = TestBlockWith Tx + +-- | Mock of a UTxO transaction where exactly one (transaction) input is +-- consumed and exactly one output is produced. +-- +data Tx = Tx { + -- | Input that the transaction consumes. + consumed :: Token + -- | Ouptupt that the transaction produces. + , produced :: (Token, TValue) + } + deriving stock (Show, Eq, Ord, Generic) + deriving anyclass (Serialise, NoThunks, ToExpr) + +-- | A token is an identifier for the values produced and consumed by the +-- 'TestBlock' transactions. +-- +-- This is analogous to @TxId@: it's how we identify what's in the table. It's +-- also analogous to @TxIn@, since we trivially only have one output per 'Tx'. +newtype Token = Token { unToken :: Point TestBlock } + deriving stock (Show, Eq, Ord, Generic) + deriving newtype (Serialise, NoThunks, ToExpr, QC.Arbitrary) + +instance QC.Arbitrary (Point TestBlock) where + arbitrary = do + slot <- SlotNo <$> QC.arbitrary + hash <- TestHash . fromJust . nonEmpty . QC.getNonEmpty <$> QC.arbitrary + pure $ Point $ WithOrigin.At $ Block slot hash + +-- | Unit of value associated with the output produced by a transaction. +-- +-- This is analogous to @TxOut@: it's what the table maps 'Token's to. +newtype TValue = TValue (WithOrigin SlotNo) + deriving stock (Show, Eq, Ord, Generic) + deriving newtype (Serialise, NoThunks, ToExpr) + +{------------------------------------------------------------------------------- + A ledger semantics for TestBlock +-------------------------------------------------------------------------------} + +data TxErr + = TokenWasAlreadyCreated Token + | TokenDoesNotExist Token + deriving stock (Generic, Eq, Show) + deriving anyclass (NoThunks, Serialise, ToExpr) + +instance PayloadSemantics Tx where + data PayloadDependentState Tx mk = + UTxTok { utxtoktables :: LedgerTables (LedgerState TestBlock) mk + -- | All the tokens that ever existed. We use this to + -- make sure a token is not created more than once. See + -- the definition of 'applyPayload' in the + -- 'PayloadSemantics' of 'Tx'. + , utxhist :: Set Token + } + deriving stock (Generic) + + type PayloadDependentError Tx = TxErr + + -- We need to exercise the HD backend. This requires that we store key-values + -- ledger tables and the block application semantics satisfy: + -- + -- * a key is deleted at most once + -- * a key is inserted at most once + -- + applyPayload st Tx{consumed, produced} = + fmap track $ delete consumed st >>= uncurry insert produced + where + insert :: + Token + -> TValue + -> PayloadDependentState Tx ValuesMK + -> Either TxErr (PayloadDependentState Tx ValuesMK) + insert tok val st'@UTxTok{utxtoktables, utxhist} = + if tok `Set.member` utxhist + then Left $ TokenWasAlreadyCreated tok + else Right $ st' { utxtoktables = Map.insert tok val `onValues` utxtoktables + , utxhist = Set.insert tok utxhist + } + delete :: + Token + -> PayloadDependentState Tx ValuesMK + -> Either TxErr (PayloadDependentState Tx ValuesMK) + delete tok st'@UTxTok{utxtoktables} = + if Map.member tok `queryKeys` utxtoktables + then Right $ st' { utxtoktables = Map.delete tok `onValues` utxtoktables + } + else Left $ TokenDoesNotExist tok + + track :: PayloadDependentState Tx ValuesMK -> PayloadDependentState Tx TrackingMK + track stAfter = + stAfter { utxtoktables = + LedgerTables $ rawCalculateDifference utxtokBefore utxtokAfter + } + where + utxtokBefore = getLedgerTables $ utxtoktables st + utxtokAfter = getLedgerTables $ utxtoktables stAfter + + getPayloadKeySets Tx{consumed} = + LedgerTables $ KeysMK $ Set.singleton consumed + +deriving instance Eq (LedgerTables (LedgerState TestBlock) mk) => Eq (PayloadDependentState Tx mk) +deriving instance NoThunks (LedgerTables (LedgerState TestBlock) mk) => NoThunks (PayloadDependentState Tx mk) +deriving instance Show (LedgerTables (LedgerState TestBlock) mk) => Show (PayloadDependentState Tx mk) +deriving instance Serialise (LedgerTables (LedgerState TestBlock) mk) => Serialise (PayloadDependentState Tx mk) + +onValues :: + (Map Token TValue -> Map Token TValue) + -> LedgerTables (LedgerState TestBlock) ValuesMK + -> LedgerTables (LedgerState TestBlock) ValuesMK +onValues f (LedgerTables testUtxtokTable) = LedgerTables $ updateMap testUtxtokTable + where + updateMap :: ValuesMK Token TValue -> ValuesMK Token TValue + updateMap (ValuesMK utxovals) = + ValuesMK $ f utxovals + +queryKeys :: + (Map Token TValue -> a) + -> LedgerTables (LedgerState TestBlock) ValuesMK + -> a +queryKeys f (LedgerTables (ValuesMK utxovals)) = f utxovals + +{------------------------------------------------------------------------------- + Instances required for on-disk storage of ledger state tables +-------------------------------------------------------------------------------} + +type instance Ledger.Key (LedgerState TestBlock) = Token +type instance Ledger.Value (LedgerState TestBlock) = TValue + +instance HasLedgerTables (LedgerState TestBlock) where + projectLedgerTables st = utxtoktables $ payloadDependentState st + withLedgerTables st table = st { payloadDependentState = + (payloadDependentState st) {utxtoktables = table} + } + +instance HasLedgerTables (Ticked1 (LedgerState TestBlock)) where + projectLedgerTables (TickedTestLedger st) = + castLedgerTables $ projectLedgerTables st + withLedgerTables (TickedTestLedger st) tables = + TickedTestLedger $ withLedgerTables st $ castLedgerTables tables + +instance CanSerializeLedgerTables (LedgerState TestBlock) + +instance Serialise (LedgerTables (LedgerState TestBlock) EmptyMK) where + encode (LedgerTables (_ :: EmptyMK Token TValue)) + = CBOR.encodeNull + decode = LedgerTables EmptyMK <$ CBOR.decodeNull + +instance ToCBOR Token where + toCBOR (Token pt) = S.encode pt + +instance FromCBOR Token where + fromCBOR = fmap Token S.decode + +instance ToCBOR TValue where + toCBOR (TValue v) = S.encode v + +instance FromCBOR TValue where + fromCBOR = fmap TValue S.decode + +instance CanStowLedgerTables (LedgerState TestBlock) where + stowLedgerTables = stowErr "stowLedgerTables" + unstowLedgerTables = stowErr "unstowLedgerTables" + +stowErr :: String -> a +stowErr fname = error $ "Function " <> fname <> " should not be used in these tests." + +deriving anyclass instance ToExpr v => ToExpr (DS.Delta v) +deriving anyclass instance (ToExpr k, ToExpr v) => ToExpr (DS.Diff k v) +deriving anyclass instance (ToExpr k, ToExpr v) => ToExpr (DS.RootMeasure k v) +deriving anyclass instance (ToExpr k, ToExpr v) => ToExpr (DS.InternalMeasure k v) +deriving anyclass instance (ToExpr v) => ToExpr (StrictMaybe v) +deriving anyclass instance (ToExpr k, ToExpr v) => ToExpr (DS.Element k v) +deriving anyclass instance ToExpr DS.Length +deriving anyclass instance ToExpr DS.SlotNoUB +deriving anyclass instance ToExpr DS.SlotNoLB +deriving anyclass instance ToExpr (mk Token TValue) => ToExpr (LedgerTables (LedgerState TestBlock) mk) +deriving instance ToExpr (LedgerTables (LedgerState TestBlock) mk) => ToExpr (PayloadDependentState Tx mk) + +deriving newtype instance ToExpr (ValuesMK Token TValue) + +instance ToExpr v => ToExpr (DS.DeltaHistory v) where + toExpr h = App "DeltaHistory" [genericToExpr . toList . DS.getDeltaHistory $ h] + +instance ToExpr (ExtLedgerState TestBlock ValuesMK) where + toExpr = genericToExpr + +instance ToExpr (LedgerState (TestBlockWith Tx) ValuesMK) where + toExpr = genericToExpr + +instance HasHardForkHistory TestBlock where + type HardForkIndices TestBlock = '[TestBlock] + hardForkSummary = neverForksHardForkSummary tblcHardForkParams + +{------------------------------------------------------------------------------- + TestBlock generation + + When we added support for storing parts of the ledger state on disk we needed + to exercise this new functionality. Therefore, we modified this test so that + the ledger state associated to the test block contained tables (key-value + maps) to be stored on disk. This ledger state needs to follow an evolution + pattern similar to the UTxO one (see the 'PayloadSemantics' instance for more + details). As a result, block application might fail on a given payload. + + The tests in this module assume that no invalid blocks are generated. Thus we + have to satisfy this assumption in the block generators. To keep the + generators simple, eg independent on the ledger state, we follow this strategy + to block generation: + + - The block payload consist of a single transaction: + - input: Point + - output: (Point, SlotNo) + - The ledger state is a map from Point to SlotNo. + - We start always in an initial state in which 'GenesisPoint' maps to slot 0. + - When we generate a block for point p, the payload of the block will be: + - input: point p - 1 + - ouptput: (point p, slot of point p) + + + A consequence of adopting the strategy above is that the initial state is + coupled to the generator's semantics. + -------------------------------------------------------------------------------} + +genesis :: ExtLedgerState TestBlock ValuesMK +genesis = testInitExtLedgerWithState initialTestLedgerState + +initialTestLedgerState :: PayloadDependentState Tx ValuesMK +initialTestLedgerState = UTxTok { + utxtoktables = LedgerTables + $ ValuesMK + $ Map.singleton initialToken (pointTValue initialToken) + , utxhist = Set.singleton initialToken + + } + where + initialToken = Token GenesisPoint + +-- | Get the token value associated to a given token. This is coupled to the +-- generators semantics. +pointTValue :: Token -> TValue +pointTValue = TValue . pointSlot . unToken + +genBlocks :: + Word64 + -> Point TestBlock + -> [TestBlock] +genBlocks n pt0 = take (fromIntegral n) (go pt0) + where + go pt = let b = genBlock pt in b : go (blockPoint b) + +genBlock :: + Point TestBlock -> TestBlock +genBlock pt = + mkBlockFrom pt Tx { consumed = Token pt + , produced = ( Token pt', TValue (pointSlot pt')) + } + where + mkBlockFrom :: Point (TestBlockWith ptype) -> ptype -> TestBlockWith ptype + mkBlockFrom GenesisPoint = firstBlockWithPayload 0 + mkBlockFrom (BlockPoint slot hash) = successorBlockWithPayload hash slot + + pt' :: Point (TestBlockWith Tx) + pt' = castPoint (blockPoint dummyBlk) + where + -- This could be the new block itself; we merely wanted to avoid the loop. + dummyBlk :: TestBlockWith () + dummyBlk = mkBlockFrom (castPoint pt) () + +extLedgerDbConfig :: SecurityParam -> LedgerDbCfg (ExtLedgerState TestBlock) +extLedgerDbConfig secParam = LedgerDbCfg { + ledgerDbCfgSecParam = secParam + , ledgerDbCfg = ExtLedgerCfg $ singleNodeTestConfigWith TestBlockCodecConfig TestBlockStorageConfig secParam (GenesisWindow (2 * maxRollbacks secParam)) + } + + +-- | TODO: for the time being 'TestBlock' does not have any codec config +data instance CodecConfig TestBlock = TestBlockCodecConfig + deriving (Show, Generic, NoThunks) + +-- | TODO: for the time being 'TestBlock' does not have any storage config +data instance StorageConfig TestBlock = TestBlockStorageConfig + deriving (Show, Generic, NoThunks) diff --git a/ouroboros-consensus/test/storage-test/Test/Ouroboros/Storage/LedgerDB/V1/BackingStore.hs b/ouroboros-consensus/test/storage-test/Test/Ouroboros/Storage/LedgerDB/V1/BackingStore.hs new file mode 100644 index 0000000000..7e720aebf0 --- /dev/null +++ b/ouroboros-consensus/test/storage-test/Test/Ouroboros/Storage/LedgerDB/V1/BackingStore.hs @@ -0,0 +1,345 @@ +{-# LANGUAGE DerivingVia #-} +{-# LANGUAGE FlexibleContexts #-} +{-# LANGUAGE FlexibleInstances #-} +{-# LANGUAGE GADTs #-} +{-# LANGUAGE GeneralisedNewtypeDeriving #-} +{-# LANGUAGE LambdaCase #-} +{-# LANGUAGE MultiParamTypeClasses #-} +{-# LANGUAGE NamedFieldPuns #-} +{-# LANGUAGE PackageImports #-} +{-# LANGUAGE Rank2Types #-} +{-# LANGUAGE ScopedTypeVariables #-} +{-# LANGUAGE StandaloneDeriving #-} +{-# LANGUAGE TypeOperators #-} + +{-# OPTIONS_GHC -Wno-orphans #-} +{- HLINT ignore "Use camelCase" -} + +module Test.Ouroboros.Storage.LedgerDB.V1.BackingStore ( + labelledExamples + , tests + ) where + +import Cardano.Binary (FromCBOR (..), ToCBOR (..)) +import Cardano.Slotting.Slot +import Control.Concurrent.Class.MonadMVar.Strict +import Control.Concurrent.Class.MonadSTM.Strict.TMVar +import Control.Monad (void) +import Control.Monad.Class.MonadThrow (Handler (..), catches) +import Control.Monad.IO.Class (MonadIO (..)) +import Control.Monad.IOSim +import Control.Monad.Reader (runReaderT) +import qualified Data.Map.Strict as Map +import qualified Data.Set as Set +import qualified Data.SOP.Dict as Dict +import Data.Typeable +import Ouroboros.Consensus.Ledger.Tables +import qualified Ouroboros.Consensus.Ledger.Tables.Diff as Diff +import Ouroboros.Consensus.Ledger.Tables.Utils +import qualified Ouroboros.Consensus.Storage.LedgerDB.V1.Args as BS +import qualified Ouroboros.Consensus.Storage.LedgerDB.V1.BackingStore as BS +import qualified Ouroboros.Consensus.Storage.LedgerDB.V1.BackingStore.Impl.InMemory as InMemory +import qualified Ouroboros.Consensus.Storage.LedgerDB.V1.BackingStore.Impl.LMDB as LMDB +import Ouroboros.Consensus.Util.Args +import Ouroboros.Consensus.Util.IOLike hiding (MonadMask (..), + newMVar, newTVarIO, readMVar) +import qualified System.Directory as Dir +import System.FS.API hiding (Handle) +import System.FS.IO (ioHasFS) +import qualified System.FS.Sim.MockFS as MockFS +import System.FS.Sim.STM +import System.IO.Temp (createTempDirectory) +import Test.Ouroboros.Storage.LedgerDB.V1.BackingStore.Lockstep +import qualified Test.Ouroboros.Storage.LedgerDB.V1.BackingStore.Mock as Mock +import Test.Ouroboros.Storage.LedgerDB.V1.BackingStore.Registry +import qualified Test.QuickCheck as QC +import Test.QuickCheck (Arbitrary (..), Property, Testable) +import "quickcheck-dynamic" Test.QuickCheck.Extras +import Test.QuickCheck.Gen.Unsafe +import qualified Test.QuickCheck.Monadic as QC +import Test.QuickCheck.Monadic (PropertyM) +import Test.QuickCheck.StateModel as StateModel +import Test.QuickCheck.StateModel.Lockstep as Lockstep +import Test.QuickCheck.StateModel.Lockstep.Run as Lockstep +import Test.Tasty +import Test.Tasty.QuickCheck (QuickCheckTests (..), testProperty) +import Test.Util.LedgerStateOnlyTables +import Test.Util.Orphans.Arbitrary () +import Test.Util.Orphans.IOLike () +import Test.Util.Orphans.ToExpr () + +{------------------------------------------------------------------------------- + Main test tree +-------------------------------------------------------------------------------} + +tests :: TestTree +tests = testGroup "BackingStore" [ + adjustOption (scaleQuickCheckTests 10) $ + testProperty "InMemory IOSim SimHasFS" testWithIOSim + , adjustOption (scaleQuickCheckTests 10) $ + testProperty "InMemory IO SimHasFS" $ testWithIO $ + setupBSEnv (const BS.InMemoryBackingStoreArgs) setupSimHasFS (pure ()) + , adjustOption (scaleQuickCheckTests 10) $ + testProperty "InMemory IO IOHasFS" $ testWithIO $ do + (fp, cleanup) <- setupTempDir + setupBSEnv (const BS.InMemoryBackingStoreArgs) (setupIOHasFS fp) cleanup + , adjustOption (scaleQuickCheckTests 2) $ + testProperty "LMDB IO IOHasFS" $ testWithIO $ do + (fp, cleanup) <- setupTempDir + setupBSEnv (\x -> BS.LMDBBackingStoreArgs (BS.LiveLMDBFS x) testLMDBLimits Dict.Dict) (setupIOHasFS fp) cleanup + ] + +scaleQuickCheckTests :: Int -> QuickCheckTests -> QuickCheckTests +scaleQuickCheckTests c (QuickCheckTests n) = QuickCheckTests $ c * n + +testLMDBLimits :: LMDB.LMDBLimits +testLMDBLimits = LMDB.LMDBLimits + { -- 100 MiB should be more than sufficient for the tests we're running here. + -- If the database were to grow beyond 100 Mebibytes, resulting in a test + -- error, then something in the LMDB backing store or tests has changed and + -- we should reconsider this value. + LMDB.lmdbMapSize = 100 * 1024 * 1024 + -- 3 internal databases: 1 for the settings, 1 for the state, and 1 for the + -- ledger tables. + , LMDB.lmdbMaxDatabases = 3 + + , LMDB.lmdbMaxReaders = maxOpenValueHandles + } + +testWithIOSim :: Actions (Lockstep (BackingStoreState K V D)) -> Property +testWithIOSim acts = monadicSim $ do + BSEnv {bsRealEnv, bsCleanup} <- + QC.run (setupBSEnv (const BS.InMemoryBackingStoreArgs) setupSimHasFS (pure ())) + void $ + runPropertyIOLikeMonad $ + runPropertyReaderT (StateModel.runActions acts) bsRealEnv + QC.run bsCleanup + pure True + +testWithIO:: + IO (BSEnv IO K V D) + -> Actions (Lockstep T) -> Property +testWithIO mkBSEnv = runActionsBracket pT mkBSEnv bsCleanup runner + +runner :: + RealMonad m ks vs d a + -> BSEnv m ks vs d + -> m a +runner c r = unIOLikeMonad . runReaderT c $ bsRealEnv r + +-- | Generate minimal examples for each label. +labelledExamples :: IO () +labelledExamples = do + -- TODO: the thread delay ensures that we do not start printing labelled + -- exampes throughout other test output, but it is not a very nice solution. + -- We should find a better alternative. + threadDelay 1 + QC.labelledExamples $ tagActions pT + +{------------------------------------------------------------------------------- + Resources +-------------------------------------------------------------------------------} + +data BSEnv m ks vs d = BSEnv { + bsRealEnv :: RealEnv m ks vs d + , bsCleanup :: m () + } + +-- | Set up a simulated @'HasFS'@. +setupSimHasFS :: IOLike m => m (SomeHasFS m) +setupSimHasFS = SomeHasFS . simHasFS <$> newTMVarIO MockFS.empty + +-- | Set up a @'HasFS'@ for @'IO'@. +setupIOHasFS :: (PrimState m ~ PrimState IO, MonadIO m) => FilePath -> m (SomeHasFS m) +setupIOHasFS = pure . SomeHasFS . ioHasFS . MountPoint + +-- | In case we are running tests in @'IO'@, we must do some temporary directory +-- management. +setupTempDir :: MonadIO m => m (FilePath, m ()) +setupTempDir = do + sysTmpDir <- liftIO Dir.getTemporaryDirectory + qsmTmpDir <- liftIO $ createTempDirectory sysTmpDir "BS_QSM" + pure (qsmTmpDir, liftIO $ Dir.removeDirectoryRecursive qsmTmpDir) + +setupBSEnv :: + IOLike m + => (SomeHasFS m -> Complete BS.BackingStoreArgs m) + -> m (SomeHasFS m) + -> m () + -> m (BSEnv m K V D) +setupBSEnv bss mkShfs cleanup = do + shfs@(SomeHasFS hfs) <- mkShfs + + createDirectory hfs (mkFsPath ["copies"]) + + let bsi = BS.newBackingStoreInitialiser mempty (bss shfs) (BS.SnapshotsFS shfs) + + bsVar <- newMVar =<< bsi (BS.InitFromValues Origin emptyLedgerTables) + + rr <- initHandleRegistry + + let + bsCleanup = do + bs <- readMVar bsVar + catches (BS.bsClose bs) closeHandlers + cleanup + + pure BSEnv { + bsRealEnv = RealEnv { + reBackingStoreInit = bsi + , reBackingStore = bsVar + , reRegistry = rr + } + , bsCleanup + } + +-- | A backing store will throw an error on close if it has already been closed, +-- which we ignore if we are performing a close as part of resource cleanup. +closeHandlers :: IOLike m => [Handler m ()] +closeHandlers = [ + Handler $ \case + InMemory.InMemoryBackingStoreClosedExn -> pure () + e -> throwIO e + , Handler $ \case + LMDB.LMDBErrClosed -> pure () + e -> throwIO e + ] + +{------------------------------------------------------------------------------- + Types under test +-------------------------------------------------------------------------------} + +type T = BackingStoreState K V D + +pT :: Proxy T +pT = Proxy + +type K = LedgerTables (OTLedgerState (Fixed Word) (Fixed Word)) KeysMK +type V = LedgerTables (OTLedgerState (Fixed Word) (Fixed Word)) ValuesMK +type D = LedgerTables (OTLedgerState (Fixed Word) (Fixed Word)) DiffMK + +{------------------------------------------------------------------------------- + @'HasOps'@ instances +-------------------------------------------------------------------------------} + +instance Mock.EmptyValues V where + emptyValues = emptyLedgerTables + +instance Mock.ApplyDiff V D where + applyDiff = applyDiffs' + +instance Mock.LookupKeysRange K V where + lookupKeysRange = \prev n vs -> + case prev of + Nothing -> + ltmap (rangeRead n) vs + Just ks -> + ltliftA2 (rangeRead' n) ks vs + where + rangeRead :: Int -> ValuesMK k v -> ValuesMK k v + rangeRead n (ValuesMK vs) = + ValuesMK $ Map.take n vs + + rangeRead' :: + Ord k + => Int + -> KeysMK k v + -> ValuesMK k v + -> ValuesMK k v + rangeRead' n ksmk vsmk = + case Set.lookupMax ks of + Nothing -> ValuesMK Map.empty + Just k -> ValuesMK $ + Map.take n $ snd $ Map.split k vs + where + KeysMK ks = ksmk + ValuesMK vs = vsmk + +instance Mock.LookupKeys K V where + lookupKeys = ltliftA2 readKeys + where + readKeys :: + Ord k + => KeysMK k v + -> ValuesMK k v + -> ValuesMK k v + readKeys (KeysMK ks) (ValuesMK vs) = + ValuesMK $ Map.restrictKeys vs ks + +instance Mock.ValuesLength V where + valuesLength (LedgerTables (ValuesMK m)) = + Map.size m + +instance Mock.MakeDiff V D where + diff t1 t2 = forgetTrackingValues $ calculateDifference t1 t2 + +instance Mock.DiffSize D where + diffSize (LedgerTables (DiffMK (Diff.Diff m))) = Map.size m + +instance Mock.KeysSize K where + keysSize (LedgerTables (KeysMK s)) = Set.size s + +instance Mock.HasOps K V D + +{------------------------------------------------------------------------------- + Utilities +-------------------------------------------------------------------------------} + +runPropertyIOLikeMonad :: + IOLikeMonadC m + => PropertyM (IOLikeMonad m) a + -> PropertyM m a +runPropertyIOLikeMonad p = QC.MkPropertyM $ \k -> do + m <- QC.unPropertyM p $ fmap ioLikeMonad . k + return $ unIOLikeMonad m + +-- | Copied from @Ouroboros.Network.Testing.QuickCheck@. +runSimGen :: (forall s. QC.Gen (IOSim s a)) -> QC.Gen a +runSimGen f = do + Capture eval <- capture + return $ runSimOrThrow (eval f) + +-- | Copied from @Ouroboros.Network.Testing.QuickCheck@. +monadicSim :: Testable a => (forall s. PropertyM (IOSim s) a) -> Property +monadicSim m = QC.property (runSimGen (QC.monadic' m)) + +{------------------------------------------------------------------------------- + Orphan Arbitrary instances +-------------------------------------------------------------------------------} + +deriving newtype instance QC.Arbitrary (mk k v) + => QC.Arbitrary (OTLedgerTables k v mk) + +instance (Ord k, QC.Arbitrary k) + => QC.Arbitrary (KeysMK k v) where + arbitrary = KeysMK <$> QC.arbitrary + shrink (KeysMK ks) = KeysMK <$> QC.shrink ks + +instance (Ord k, QC.Arbitrary k, QC.Arbitrary v) + => QC.Arbitrary (DiffMK k v) where + arbitrary = DiffMK <$> QC.arbitrary + shrink (DiffMK d) = DiffMK <$> QC.shrink d + +instance (Ord k, QC.Arbitrary k, QC.Arbitrary v) + => QC.Arbitrary (ValuesMK k v) where + arbitrary = ValuesMK <$> QC.arbitrary + shrink (ValuesMK vs) = ValuesMK <$> QC.shrink vs + +deriving newtype instance (Ord k, QC.Arbitrary k, QC.Arbitrary v) + => QC.Arbitrary (Diff.Diff k v) +instance QC.Arbitrary v => QC.Arbitrary (Diff.Delta v) where + arbitrary = + QC.oneof [ + Diff.Insert <$> QC.arbitrary + , pure Diff.Delete + ] + +instance QC.Arbitrary ks => QC.Arbitrary (BS.RangeQuery ks) where + arbitrary = BS.RangeQuery <$> QC.arbitrary <*> QC.arbitrary + shrink (BS.RangeQuery x y) = BS.RangeQuery <$> QC.shrink x <*> QC.shrink y + +newtype Fixed a = Fixed a + deriving newtype (Show, Eq, Ord) + deriving newtype (NoThunks, ToCBOR, FromCBOR) + +deriving via QC.Fixed a instance QC.Arbitrary a => QC.Arbitrary (Fixed a) diff --git a/ouroboros-consensus/test/storage-test/Test/Ouroboros/Storage/LedgerDB/V1/BackingStore/Lockstep.hs b/ouroboros-consensus/test/storage-test/Test/Ouroboros/Storage/LedgerDB/V1/BackingStore/Lockstep.hs new file mode 100644 index 0000000000..cc9d433a80 --- /dev/null +++ b/ouroboros-consensus/test/storage-test/Test/Ouroboros/Storage/LedgerDB/V1/BackingStore/Lockstep.hs @@ -0,0 +1,811 @@ +{-# LANGUAGE ConstraintKinds #-} +{-# LANGUAGE DataKinds #-} +{-# LANGUAGE DerivingStrategies #-} +{-# LANGUAGE FlexibleContexts #-} +{-# LANGUAGE FlexibleInstances #-} +{-# LANGUAGE GADTs #-} +{-# LANGUAGE GeneralisedNewtypeDeriving #-} +{-# LANGUAGE InstanceSigs #-} +{-# LANGUAGE LambdaCase #-} +{-# LANGUAGE MultiParamTypeClasses #-} +{-# LANGUAGE NamedFieldPuns #-} +{-# LANGUAGE Rank2Types #-} +{-# LANGUAGE ScopedTypeVariables #-} +{-# LANGUAGE StandaloneDeriving #-} +{-# LANGUAGE TypeApplications #-} +{-# LANGUAGE TypeFamilies #-} + +module Test.Ouroboros.Storage.LedgerDB.V1.BackingStore.Lockstep ( + -- * Facilitate running the tests in @'IO'@ or @'IOSim'@. + IOLikeMonad (..) + , IOLikeMonadC (..) + , RealMonad + , unIOLikeMonad + -- * Model state + , BackingStoreState (..) + , RealEnv (..) + , maxOpenValueHandles + ) where + +import Cardano.Slotting.Slot +import Control.Concurrent.Class.MonadMVar.Strict +import Control.Monad +import Control.Monad.Class.MonadThrow +import Control.Monad.IOSim +import Control.Monad.Reader +import Data.Bifunctor +import Data.Constraint +import Data.Map.Strict (Map) +import qualified Data.Map.Strict as Map +import Data.Typeable +import qualified Ouroboros.Consensus.Storage.LedgerDB.V1.BackingStore as BS +import qualified Ouroboros.Consensus.Storage.LedgerDB.V1.BackingStore.Impl.InMemory as BS +import Ouroboros.Consensus.Storage.LedgerDB.V1.BackingStore.Impl.LMDB as LMDB + (LMDBErr (..)) +import Ouroboros.Consensus.Util.IOLike hiding (MonadMask (..), + StrictMVar, handle, readMVar, swapMVar) +import System.FS.API hiding (Handle) +import qualified System.FS.API.Types as FS +import Test.Cardano.Ledger.Binary.Arbitrary () +import qualified Test.Ouroboros.Storage.LedgerDB.V1.BackingStore.Mock as Mock +import Test.Ouroboros.Storage.LedgerDB.V1.BackingStore.Mock (Err (..), + Mock (..), ValueHandle (..), runMockState) +import Test.Ouroboros.Storage.LedgerDB.V1.BackingStore.Registry +import qualified Test.QuickCheck as QC +import Test.QuickCheck (Gen) +import Test.QuickCheck.StateModel +import Test.QuickCheck.StateModel.Lockstep as Lockstep +import Test.QuickCheck.StateModel.Lockstep.Defaults as Lockstep +import Test.QuickCheck.StateModel.Lockstep.Op as Lockstep +import Test.QuickCheck.StateModel.Lockstep.Op.SumProd as Lockstep +import Test.Util.Orphans.Arbitrary () +import Test.Util.Orphans.ToExpr () + +{------------------------------------------------------------------------------- + Facilitate running the tests in @'IO'@ or @'IOSim'@. +-------------------------------------------------------------------------------} + +-- This wrapper allows us to run the tests both in @'IO'@ and @'IOSim'@, without +-- having to duplicate code for both @'IO'@ and @'IOSim'@. +data IOLikeMonad m a where + RealIO :: IO a -> IOLikeMonad IO a + SimIO :: IOSim s a -> IOLikeMonad (IOSim s) a + +-- | Retrieve the wrapped @'IOLike'@ monad. +unIOLikeMonad :: IOLikeMonad m a -> m a +unIOLikeMonad (RealIO x) = x +unIOLikeMonad (SimIO x) = x + +-- | Create a wrapper @'IOLike'@ monad. +class IOLikeMonadC m where + ioLikeMonad :: m a -> IOLikeMonad m a + +instance IOLikeMonadC IO where + ioLikeMonad x = RealIO x + +instance IOLikeMonadC (IOSim s) where + ioLikeMonad x = SimIO x + +instance (Functor m, IOLikeMonadC m) => Functor (IOLikeMonad m) where + fmap f x = ioLikeMonad $ fmap f (unIOLikeMonad x) + +instance (Applicative m, IOLikeMonadC m) =>Applicative (IOLikeMonad m) where + x <*> y = ioLikeMonad $ unIOLikeMonad x <*> unIOLikeMonad y + pure = ioLikeMonad . pure + +instance (Monad m, IOLikeMonadC m) => Monad (IOLikeMonad m) where + m >>= fm = ioLikeMonad $ unIOLikeMonad m >>= unIOLikeMonad . fm + +-- | Since the tests do not return any types specific to the underlying +-- @'IOLike'@ monad, @'Realized' ('IOLikeMonad' m)@ behaves just like +-- @'Realized' 'IO'@. +type instance Realized (IOLikeMonad m) a = a + +{------------------------------------------------------------------------------- + @'Values'@ wrapper +-------------------------------------------------------------------------------} + +-- | Wrapper for preventing nonsenical pattern matches. +-- +-- A logical step is to have the @'BSVHRangeRead'@ and @'BSVHRead'@ actions +-- declare that the result of the action should be something of type @'vs'@. +-- However, this means that in theory @'vs'@ could be instantiated to any type +-- (like @'Handle'@). Consequentially, if we match on a value that is returned +-- by running an action, we would always have to match on the case where it is a +-- result of running @'BSVHRangeRead'@ and @'BSVHRead'@ as well, even if the +-- return type is @'Handle'@, which we don't expect to use as our @vs@ type. As +-- such, we define this wrapper to prevent having to match on this nonsensical +-- case. +newtype Values vs = Values {unValues :: vs} + deriving stock (Show, Eq, Ord, Typeable) + deriving newtype QC.Arbitrary + +{------------------------------------------------------------------------------- + Model state +-------------------------------------------------------------------------------} + +data BackingStoreState ks vs d = BackingStoreState { + bssMock :: Mock vs + , bssStats :: Stats ks vs d + } + deriving (Show, Eq) + +initState :: Mock.EmptyValues vs => BackingStoreState ks vs d +initState = BackingStoreState { + bssMock = Mock.emptyMock + , bssStats = initStats + } + +-- | Maximum number of LMDB readers that can be active at a time. +-- +-- 32 is an arbitrary number of readers. We can increase or decrease this at +-- will. +maxOpenValueHandles :: Int +maxOpenValueHandles = 32 + +{------------------------------------------------------------------------------- + @'StateModel'@ and @'RunModel'@ instances +-------------------------------------------------------------------------------} + +type BackingStoreInitializer m ks vs d = + BS.InitFrom vs + -> m (BS.BackingStore m ks vs d) + +data RealEnv m ks vs d = RealEnv { + reBackingStoreInit :: BackingStoreInitializer m ks vs d + , reBackingStore :: StrictMVar m (BS.BackingStore m ks vs d) + , reRegistry :: HandleRegistry m (BS.BackingStoreValueHandle m ks vs) + } + +type RealMonad m ks vs d = ReaderT (RealEnv m ks vs d) (IOLikeMonad m) + +type BSAct ks vs d a = + Action + (Lockstep (BackingStoreState ks vs d)) + (Either Err a) +type BSVar ks vs d a = + ModelVar (BackingStoreState ks vs d) a + +instance ( Show ks, Show vs, Show d + , Eq ks, Eq vs, Eq d + , Typeable ks, Typeable vs, Typeable d + , QC.Arbitrary ks, QC.Arbitrary vs, QC.Arbitrary d + , QC.Arbitrary (BS.RangeQuery ks) + , Mock.HasOps ks vs d + ) => StateModel (Lockstep (BackingStoreState ks vs d)) where + data Action (Lockstep (BackingStoreState ks vs d)) a where + -- Reopen a backing store by intialising from values. + BSInitFromValues :: WithOrigin SlotNo + -> Values vs + -> BSAct ks vs d () + -- Reopen a backing store by initialising from a copy. + BSInitFromCopy :: FS.FsPath + -> BSAct ks vs d () + BSClose :: BSAct ks vs d () + BSCopy :: FS.FsPath + -> BSAct ks vs d () + BSValueHandle :: BSAct ks vs d Handle + BSWrite :: SlotNo + -> d + -> BSAct ks vs d () + BSVHClose :: BSVar ks vs d Handle + -> BSAct ks vs d () + BSVHRangeRead :: BSVar ks vs d Handle + -> BS.RangeQuery ks + -> BSAct ks vs d (Values vs) + BSVHRead :: BSVar ks vs d Handle + -> ks + -> BSAct ks vs d (Values vs) + BSVHAtSlot :: BSVar ks vs d Handle + -> BSAct ks vs d (WithOrigin SlotNo) + -- | Corresponds to 'bsvhStat' + BSVHStat :: BSVar ks vs d Handle + -> BSAct ks vs d BS.Statistics + + initialState = Lockstep.initialState initState + nextState = Lockstep.nextState + precondition st act = Lockstep.precondition st act + && modelPrecondition (getModel st) act + arbitraryAction = Lockstep.arbitraryAction + shrinkAction = Lockstep.shrinkAction + +deriving stock instance (Show ks, Show vs, Show d) + => Show (LockstepAction (BackingStoreState ks vs d) a) +deriving stock instance (Eq ks, Eq vs, Eq d) + => Eq (LockstepAction (BackingStoreState ks vs d) a) + +instance ( Show ks, Show vs, Show d + , Eq ks, Eq vs, Eq d + , Typeable ks, Typeable vs, Typeable d + , QC.Arbitrary ks, QC.Arbitrary vs, QC.Arbitrary d + , QC.Arbitrary (BS.RangeQuery ks) + , IOLike m + , Mock.HasOps ks vs d + , IOLikeMonadC m + ) => RunModel + (Lockstep (BackingStoreState ks vs d)) + (RealMonad m ks vs d) where + perform = \_st -> runIO + postcondition = Lockstep.postcondition + monitoring = Lockstep.monitoring (Proxy @(RealMonad m ks vs d)) + +-- | Custom precondition that prevents errors in the @'LMDB'@ backing store due +-- to exceeding the maximum number of LMDB readers. +-- +-- See @'maxOpenValueHandles'@. +modelPrecondition :: + BackingStoreState ks vs d + -> LockstepAction (BackingStoreState ks vs d) a + -> Bool +modelPrecondition (BackingStoreState mock _stats) action = case action of + BSInitFromValues _ _ -> isClosed mock + BSInitFromCopy _ -> isClosed mock + BSCopy _ -> canOpenReader + BSValueHandle -> canOpenReader + _ -> True + where + canOpenReader = Map.size openValueHandles < maxOpenValueHandles + openValueHandles = Map.filter (==Mock.Open) (valueHandles mock) + +{------------------------------------------------------------------------------- + @'InLockstep'@ instance +-------------------------------------------------------------------------------} + +type BSVal ks vs d a = ModelValue (BackingStoreState ks vs d) a +type BSObs ks vs d a = Observable (BackingStoreState ks vs d) a + +instance ( Show ks, Show vs, Show d + , Eq ks, Eq vs, Eq d + , Typeable ks, Typeable vs, Typeable d + , QC.Arbitrary ks, QC.Arbitrary vs, QC.Arbitrary d + , QC.Arbitrary (BS.RangeQuery ks) + , Mock.HasOps ks vs d + ) => InLockstep (BackingStoreState ks vs d) where + + data instance ModelValue (BackingStoreState ks vs d) a where + MValueHandle :: ValueHandle vs -> BSVal ks vs d Handle + + MErr :: Err + -> BSVal ks vs d Err + MSlotNo :: WithOrigin SlotNo + -> BSVal ks vs d (WithOrigin SlotNo) + MValues :: vs + -> BSVal ks vs d (Values vs) + MUnit :: () + -> BSVal ks vs d () + MStatistics :: BS.Statistics + -> BSVal ks vs d BS.Statistics + + MEither :: Either (BSVal ks vs d a) (BSVal ks vs d b) + -> BSVal ks vs d (Either a b) + MPair :: (BSVal ks vs d a, BSVal ks vs d b) + -> BSVal ks vs d (a, b) + + data instance Observable (BackingStoreState ks vs d) a where + OValueHandle :: BSObs ks vs d Handle + OValues :: (Show a, Eq a, Typeable a) => a -> BSObs ks vs d (Values a) + OId :: (Show a, Eq a, Typeable a) => a -> BSObs ks vs d a + OEither :: Either (BSObs ks vs d a) (BSObs ks vs d b) + -> BSObs ks vs d (Either a b) + OPair :: (BSObs ks vs d a, BSObs ks vs d b) -> BSObs ks vs d (a, b) + + observeModel :: BSVal ks vs d a -> BSObs ks vs d a + observeModel = \case + MValueHandle _ -> OValueHandle + MErr x -> OId x + MSlotNo x -> OId x + MValues x -> OValues x + MUnit x -> OId x + MStatistics x -> OId x + MEither x -> OEither $ bimap observeModel observeModel x + MPair x -> OPair $ bimap observeModel observeModel x + + modelNextState :: forall a. + LockstepAction (BackingStoreState ks vs d) a + -> ModelLookUp (BackingStoreState ks vs d) + -> BackingStoreState ks vs d -> (BSVal ks vs d a, BackingStoreState ks vs d) + modelNextState action lookUp (BackingStoreState mock stats) = + auxStats $ runMock lookUp action mock + where + auxStats :: + (BSVal ks vs d a, Mock vs) + -> (BSVal ks vs d a, BackingStoreState ks vs d) + auxStats (result, state') = + ( result + , BackingStoreState state' $ updateStats action lookUp result stats + ) + + type ModelOp (BackingStoreState ks vs d) = Op + + usedVars :: + LockstepAction (BackingStoreState ks vs d) a + -> [AnyGVar (ModelOp (BackingStoreState ks vs d))] + usedVars = \case + BSInitFromValues _ _ -> [] + BSInitFromCopy _ -> [] + BSClose -> [] + BSCopy _ -> [] + BSValueHandle -> [] + BSWrite _ _ -> [] + BSVHClose h -> [SomeGVar h] + BSVHRangeRead h _ -> [SomeGVar h] + BSVHRead h _ -> [SomeGVar h] + BSVHAtSlot h -> [SomeGVar h] + BSVHStat h -> [SomeGVar h] + + arbitraryWithVars :: + ModelFindVariables (BackingStoreState ks vs d) + -> BackingStoreState ks vs d + -> Gen (Any (LockstepAction (BackingStoreState ks vs d))) + arbitraryWithVars = arbitraryBackingStoreAction + + shrinkWithVars :: + ModelFindVariables (BackingStoreState ks vs d) + -> BackingStoreState ks vs d + -> LockstepAction (BackingStoreState ks vs d) a + -> [Any (LockstepAction (BackingStoreState ks vs d))] + shrinkWithVars = shrinkBackingStoreAction + + tagStep :: + (BackingStoreState ks vs d, BackingStoreState ks vs d) + -> LockstepAction (BackingStoreState ks vs d) a + -> BSVal ks vs d a + -> [String] + tagStep (_before, BackingStoreState _ after) action val = + map show $ tagBSAction after action val + +deriving stock instance (Show ks, Show vs, Show d) => Show (BSVal ks vs d a) + +deriving stock instance (Show ks, Show vs, Show d) => Show (BSObs ks vs d a) +deriving stock instance (Eq ks, Eq vs, Eq d) => Eq (BSObs ks vs d a) + +{------------------------------------------------------------------------------- + @'RunLockstep'@ instance +-------------------------------------------------------------------------------} + +instance ( Show ks, Show vs, Show d + , Eq ks, Eq vs, Eq d + , Typeable ks, Typeable vs, Typeable d + , QC.Arbitrary ks, QC.Arbitrary vs, QC.Arbitrary d + , QC.Arbitrary (BS.RangeQuery ks) + , IOLike m + , Mock.HasOps ks vs d + , IOLikeMonadC m + ) => RunLockstep (BackingStoreState ks vs d) (RealMonad m ks vs d) where + observeReal :: + Proxy (RealMonad m ks vs d) + -> LockstepAction (BackingStoreState ks vs d) a + -> Realized (RealMonad m ks vs d) a + -> BSObs ks vs d a + observeReal _proxy = \case + BSInitFromValues _ _ -> OEither . bimap OId OId + BSInitFromCopy _ -> OEither . bimap OId OId + BSClose -> OEither . bimap OId OId + BSCopy _ -> OEither . bimap OId OId + BSValueHandle -> OEither . bimap OId (const OValueHandle) + BSWrite _ _ -> OEither . bimap OId OId + BSVHClose _ -> OEither . bimap OId OId + BSVHRangeRead _ _ -> OEither . bimap OId (OValues . unValues) + BSVHRead _ _ -> OEither . bimap OId (OValues . unValues) + BSVHAtSlot _ -> OEither . bimap OId OId + BSVHStat _ -> OEither . bimap OId OId + + showRealResponse :: + Proxy (RealMonad m ks vs d) + -> LockstepAction (BackingStoreState ks vs d) a + -> Maybe (Dict (Show (Realized (RealMonad m ks vs d) a))) + showRealResponse _proxy = \case + BSInitFromValues _ _ -> Just Dict + BSInitFromCopy _ -> Just Dict + BSClose -> Just Dict + BSCopy _ -> Just Dict + BSValueHandle -> Just Dict + BSWrite _ _ -> Just Dict + BSVHClose _ -> Just Dict + BSVHRangeRead _ _ -> Just Dict + BSVHRead _ _ -> Just Dict + BSVHAtSlot _ -> Just Dict + BSVHStat _ -> Just Dict + +{------------------------------------------------------------------------------- + Interpreter against the model +-------------------------------------------------------------------------------} + +runMock :: + Mock.HasOps ks vs d + => ModelLookUp (BackingStoreState ks vs d) + -> Action (Lockstep (BackingStoreState ks vs d)) a + -> Mock vs + -> ( BSVal ks vs d a + , Mock vs + ) +runMock lookUp = \case + BSInitFromValues sl (Values vs) -> + wrap MUnit . runMockState (Mock.mBSInitFromValues sl vs) + BSInitFromCopy bsp -> + wrap MUnit . runMockState (Mock.mBSInitFromCopy bsp) + BSClose -> + wrap MUnit . runMockState Mock.mBSClose + BSCopy bsp -> + wrap MUnit . runMockState (Mock.mBSCopy bsp) + BSValueHandle -> + wrap MValueHandle . runMockState Mock.mBSValueHandle + BSWrite sl d -> + wrap MUnit . runMockState (Mock.mBSWrite sl d) + BSVHClose h -> + wrap MUnit . runMockState (Mock.mBSVHClose (getHandle $ lookUp h)) + BSVHRangeRead h rq -> + wrap MValues . runMockState (Mock.mBSVHRangeRead (getHandle $ lookUp h) rq) + BSVHRead h ks -> + wrap MValues . runMockState (Mock.mBSVHRead (getHandle $ lookUp h) ks) + BSVHAtSlot h -> + wrap MSlotNo . runMockState (Mock.mBSVHAtSlot (getHandle $ lookUp h)) + BSVHStat h -> + wrap MStatistics . runMockState (Mock.mBSVHStat (getHandle $ lookUp h)) + where + wrap :: + (a -> BSVal ks vs d b) + -> (Either Err a, Mock vs) + -> (BSVal ks vs d (Either Err b), Mock vs) + wrap f = first (MEither . bimap MErr f) + + getHandle :: BSVal ks vs d Handle -> ValueHandle vs + getHandle (MValueHandle h) = h + +{------------------------------------------------------------------------------- + Generator +-------------------------------------------------------------------------------} + +arbitraryBackingStoreAction :: + forall ks vs d. + ( Eq ks, Eq vs, Eq d, Typeable vs + , QC.Arbitrary ks, QC.Arbitrary vs + , QC.Arbitrary (BS.RangeQuery ks) + , Mock.MakeDiff vs d + ) + => ModelFindVariables (BackingStoreState ks vs d) + -> BackingStoreState ks vs d + -> Gen (Any (LockstepAction (BackingStoreState ks vs d))) +arbitraryBackingStoreAction findVars (BackingStoreState mock _stats) = + QC.frequency $ + withoutVars + ++ case findVars (Proxy @(Either Err Handle)) of + [] -> [] + vars -> withVars (QC.elements vars) + where + withoutVars :: [(Int, Gen (Any (LockstepAction (BackingStoreState ks vs d))))] + withoutVars = [ + (5, fmap Some $ BSInitFromValues <$> QC.arbitrary <*> (Values <$> QC.arbitrary)) + , (5, fmap Some $ BSInitFromCopy <$> genBackingStorePath) + , (2, pure $ Some BSClose) + , (5, fmap Some $ BSCopy <$> genBackingStorePath) + , (5, pure $ Some BSValueHandle) + , (5, fmap Some $ BSWrite <$> genSlotNo <*> genDiff) + ] + + withVars :: + Gen (BSVar ks vs d (Either Err Handle)) + -> [(Int, Gen (Any (LockstepAction (BackingStoreState ks vs d))))] + withVars genVar = [ + (5, fmap Some $ BSVHClose <$> (fhandle <$> genVar)) + , (5, fmap Some $ BSVHRangeRead <$> (fhandle <$> genVar) <*> QC.arbitrary) + , (5, fmap Some $ BSVHRead <$> (fhandle <$> genVar) <*> QC.arbitrary) + , (5, fmap Some $ BSVHAtSlot <$> (fhandle <$> genVar)) + , (5, fmap Some $ BSVHStat <$> (fhandle <$> genVar)) + ] + where + fhandle :: + GVar Op (Either Err Handle) + -> GVar Op Handle + fhandle = mapGVar (\op -> OpRight `OpComp` op) + + genBackingStorePath :: Gen FS.FsPath + genBackingStorePath = do + file <- genBSPFile + pure . mkFsPath $ ["copies", file] + + -- Generate a file name for a copy of the backing store contents. We keep + -- the set of possible file names small, such that errors (i.e., file alread + -- exists) occur most of the time. + genBSPFile :: Gen String + genBSPFile = QC.elements [show x | x <- [1 :: Int .. 10]] + + -- Generate a slot number that is close before, at, or after the backing + -- store's current slot number. A + genSlotNo :: Gen SlotNo + genSlotNo = do + n :: Int <- QC.choose (-5, 5) + pure $ maybe 0 (+ fromIntegral n) (withOriginToMaybe seqNo) + where + seqNo = backingSeqNo mock + + -- Generate valid diffs most of the time, and generate fully arbitrary + -- (probably invalid) diffs some of the time. + genDiff :: Gen d + genDiff = QC.frequency [ + (9, Mock.diff (backingValues mock) <$> QC.arbitrary) + --TODO: enable @, (1, QC.arbitrary)@ + ] + +{------------------------------------------------------------------------------- + Shrinker +-------------------------------------------------------------------------------} + +shrinkBackingStoreAction :: + forall ks vs d a. + ( Typeable vs, Eq ks, Eq vs, Eq d + , QC.Arbitrary d, QC.Arbitrary (BS.RangeQuery ks), QC.Arbitrary ks + ) + => ModelFindVariables (BackingStoreState ks vs d) + -> BackingStoreState ks vs d + -> LockstepAction (BackingStoreState ks vs d) a + -> [Any (LockstepAction (BackingStoreState ks vs d))] +shrinkBackingStoreAction _findVars (BackingStoreState _mock _) = \case + BSWrite sl d -> + [Some $ BSWrite sl d' | d' <- QC.shrink d] + ++ [Some $ BSWrite sl' d | sl' <- QC.shrink sl] + BSVHRangeRead h rq -> + [Some $ BSVHRangeRead h rq' | rq' <- QC.shrink rq] + BSVHRead h ks -> + [Some $ BSVHRead h ks' | ks' <- QC.shrink ks] + _ -> [] + +{------------------------------------------------------------------------------- + Interpret @'Op'@ against @'ModelValue'@ +-------------------------------------------------------------------------------} + +instance InterpretOp Op (ModelValue (BackingStoreState ks vs d)) where + intOp OpId = Just + intOp OpFst = \case MPair x -> Just (fst x) + intOp OpSnd = \case MPair x -> Just (snd x) + intOp OpLeft = \case MEither x -> either Just (const Nothing) x + intOp OpRight = \case MEither x -> either (const Nothing) Just x + intOp (OpComp g f) = intOp g <=< intOp f + +{------------------------------------------------------------------------------- + Interpreter for implementation (@'RealMonad'@) +-------------------------------------------------------------------------------} + +runIO :: + forall m ks vs d a. (IOLike m, IOLikeMonadC m) => + LockstepAction (BackingStoreState ks vs d) a + -> LookUp (RealMonad m ks vs d) + -> RealMonad m ks vs d (Realized (RealMonad m ks vs d) a) +runIO action lookUp = ReaderT $ \renv -> + ioLikeMonad $ aux renv action + where + aux :: + RealEnv m ks vs d + -> LockstepAction (BackingStoreState ks vs d) a + -> m a + aux renv = \case + BSInitFromValues sl (Values vs) -> catchErr $ do + bs <- bsi (BS.InitFromValues sl vs) + void $ swapMVar bsVar bs + BSInitFromCopy bsp -> catchErr $ do + bs <- bsi (BS.InitFromCopy bsp) + void $ swapMVar bsVar bs + BSClose -> catchErr $ + readMVar bsVar >>= BS.bsClose + BSCopy bsp -> catchErr $ + readMVar bsVar >>= \bs -> BS.bsCopy bs bsp + BSValueHandle -> catchErr $ + readMVar bsVar >>= (BS.bsValueHandle >=> registerHandle rr) + BSWrite sl d -> catchErr $ + readMVar bsVar >>= \bs -> BS.bsWrite bs sl d + BSVHClose h -> catchErr $ + readHandle rr (lookUp' h) >>= \vh -> BS.bsvhClose vh + BSVHRangeRead h rq -> catchErr $ Values <$> + (readHandle rr (lookUp' h) >>= \vh -> BS.bsvhRangeRead vh rq) + BSVHRead h ks -> catchErr $ Values <$> + (readHandle rr (lookUp' h) >>= \vh -> BS.bsvhRead vh ks) + BSVHAtSlot h -> catchErr $ + readHandle rr (lookUp' h) >>= pure . BS.bsvhAtSlot + BSVHStat h -> catchErr $ + readHandle rr (lookUp' h) >>= \vh -> BS.bsvhStat vh + where + RealEnv{ + reBackingStoreInit = bsi + , reBackingStore = bsVar + , reRegistry = rr + } = renv + + lookUp' :: BSVar ks vs d x -> Realized (RealMonad m ks vs d) x + lookUp' = lookUpGVar (Proxy @(RealMonad m ks vs d)) lookUp + +instance InterpretOp Op (WrapRealized (IOLikeMonad m)) where + intOp = intOpRealizedId intOpId + +catchErr :: forall m a. IOLike m => m a -> m (Either Err a) +catchErr act = catches (Right <$> act) + [mkHandler fromTVarExn, mkHandler fromTVarExn', mkHandler fromDbErr] + +{------------------------------------------------------------------------------- + Statistics and tagging +-------------------------------------------------------------------------------} + +data Stats ks vs d = Stats { + -- | Slots that value handles were created in + handleSlots :: Map (ValueHandle vs) (WithOrigin SlotNo) + -- | Slots in which writes were performed + , writeSlots :: Map SlotNo Int + -- | A value handle was created before a write, and read after the write + , readAfterWrite :: Bool + -- | A value handle was created before a write, and range read after the + -- write + , rangeReadAfterWrite :: Bool + } + deriving stock (Show, Eq) + + +initStats :: Stats ks vs d +initStats = Stats { + handleSlots = Map.empty + , writeSlots = Map.empty + , readAfterWrite = False + , rangeReadAfterWrite = False + } + +updateStats :: + forall ks vs d a. Mock.HasOps ks vs d + => LockstepAction (BackingStoreState ks vs d) a + -> ModelLookUp (BackingStoreState ks vs d) + -> BSVal ks vs d a + -> Stats ks vs d + -> Stats ks vs d +updateStats action lookUp result stats@Stats{handleSlots, writeSlots} = + updateHandleSlots + . updateWriteSlots + . updateReadAfterWrite + . updateRangeReadAfterWrite + $ stats + where + getHandle :: BSVal ks vs d Handle -> ValueHandle vs + getHandle (MValueHandle h) = h + + updateHandleSlots :: Stats ks vs d -> Stats ks vs d + updateHandleSlots s = case (action, result) of + (BSValueHandle, MEither (Right (MValueHandle h))) + -> s {handleSlots = Map.insert h (seqNo h) handleSlots} + (BSClose, MEither (Right _)) + -> s {handleSlots = Map.empty} + (BSVHClose h, MEither (Right _)) + -> s {handleSlots = Map.delete (getHandle $ lookUp h) handleSlots} + _ -> s + + updateWriteSlots :: Stats ks vs d -> Stats ks vs d + updateWriteSlots s = case (action, result) of + (BSWrite sl d, MEither (Right (MUnit ()))) + | 1 <= Mock.diffSize d + -> s {writeSlots = Map.insert sl (Mock.diffSize d) writeSlots} + (BSClose, MEither (Right _)) + -> s {writeSlots = Map.empty} + _ -> s + + updateReadAfterWrite :: Stats ks vs d -> Stats ks vs d + updateReadAfterWrite s = case (action, result) of + (BSVHRead h _, MEither (Right (MValues vs))) + | h' <- getHandle $ lookUp h + , Just wosl <- Map.lookup h' handleSlots + , Just (sl, _) <- Map.lookupMax writeSlots + , wosl < at sl + , 1 <= Mock.valuesLength vs + -> s {readAfterWrite = True} + _ -> s + + updateRangeReadAfterWrite :: Stats ks vs d -> Stats ks vs d + updateRangeReadAfterWrite s = case (action, result) of + (BSVHRangeRead h _, MEither (Right (MValues vs))) + | h' <- getHandle $ lookUp h + , Just wosl <- Map.lookup h' handleSlots + , Just (sl, _) <- Map.lookupMax writeSlots + , wosl < at sl + , 1 <= Mock.valuesLength vs + -> s {rangeReadAfterWrite = True} + _ -> s + +data TagAction = + TBSInitFromValues + | TBSInitFromCopy + | TBSClose + | TBSCopy + | TBSValueHandle + | TBSWrite + | TBSVHClose + | TBSVHRangeRead + | TBSVHRead + | TBSVHAtSlot + | TBSVHStat + deriving (Show, Eq, Ord, Bounded, Enum) + +-- | Identify actions by their constructor. +tAction :: LockstepAction (BackingStoreState ks vs d) a -> TagAction +tAction = \case + BSInitFromValues _ _ -> TBSInitFromValues + BSInitFromCopy _ -> TBSInitFromCopy + BSClose -> TBSClose + BSCopy _ -> TBSCopy + BSValueHandle -> TBSValueHandle + BSWrite _ _ -> TBSWrite + BSVHClose _ -> TBSVHClose + BSVHRangeRead _ _ -> TBSVHRangeRead + BSVHRead _ _ -> TBSVHRead + BSVHAtSlot _ -> TBSVHAtSlot + BSVHStat _ -> TBSVHStat + +data Tag = + -- | A value handle is created before a write, and read after the write. The + -- write should not affect the result of the read. + ReadAfterWrite + -- | A value handle is created before a write, and read after the write. The + -- write should not affect the result of the read. + | RangeReadAfterWrite + | ErrorBecauseBackingStoreIsClosed TagAction + | ErrorBecauseBackingStoreValueHandleIsClosed TagAction + deriving (Show) + +tagBSAction :: + Stats ks vs d + -> LockstepAction (BackingStoreState ks vs d) a + -> BSVal ks vs d a + -> [Tag] +tagBSAction stats action result = + globalTags ++ case (action, result) of + (_, MEither (Left (MErr ErrBackingStoreClosed))) -> + [ErrorBecauseBackingStoreIsClosed (tAction action)] + (_, MEither (Left (MErr ErrBackingStoreValueHandleClosed))) -> + [ErrorBecauseBackingStoreValueHandleIsClosed (tAction action)] + _ -> [] + where + Stats{readAfterWrite, rangeReadAfterWrite} = stats + + globalTags = mconcat [ + [ ReadAfterWrite + | readAfterWrite + ] + , [ RangeReadAfterWrite + | rangeReadAfterWrite + ] + ] + +{------------------------------------------------------------------------------- + Errors +-------------------------------------------------------------------------------} + +mkHandler :: + (IOLike m, Exception e) + => (e -> Maybe Err) + -> Handler m (Either Err a) +mkHandler fhandler = Handler $ + \e -> maybe (throwIO e) (return . Left) (fhandler e) + +-- | Map LMDB errors to mock errors. +fromDbErr :: LMDB.LMDBErr -> Maybe Err +fromDbErr = \case + LMDBErrNoDbState -> Nothing + LMDBErrNonMonotonicSeq wo wo' -> Just $ ErrNonMonotonicSeqNo wo wo' + LMDBErrInitialisingNonEmpty _ -> Nothing + LMDBErrNoValueHandle _ -> Just ErrBackingStoreValueHandleClosed + LMDBErrBadRead -> Nothing + LMDBErrBadRangeRead -> Nothing + LMDBErrDirExists _ -> Just ErrCopyPathAlreadyExists + LMDBErrDirDoesntExist _ -> Just ErrCopyPathDoesNotExist + LMDBErrDirIsNotLMDB _ -> Nothing + LMDBErrClosed -> Just ErrBackingStoreClosed + LMDBErrInitialisingAlreadyHasState -> Nothing + LMDBErrUnableToReadSeqNo -> Nothing + LMDBErrNotADir _ -> Nothing + +-- | Map InMemory (i.e., @TVarBackingStore@) errors to mock errors. +fromTVarExn :: BS.InMemoryBackingStoreExn -> Maybe Err +fromTVarExn = \case + BS.InMemoryBackingStoreClosedExn -> Just ErrBackingStoreClosed + BS.InMemoryBackingStoreValueHandleClosedExn -> Just ErrBackingStoreValueHandleClosed + BS.InMemoryBackingStoreDirectoryExists -> Just ErrCopyPathAlreadyExists + BS.InMemoryBackingStoreNonMonotonicSeq wo wo' -> Just $ ErrNonMonotonicSeqNo wo wo' + BS.InMemoryBackingStoreDeserialiseExn _ -> Nothing + BS.InMemoryIncompleteDeserialiseExn -> Nothing + +fromTVarExn' :: BS.InMemoryBackingStoreInitExn -> Maybe Err +fromTVarExn' = \case + BS.StoreDirIsIncompatible _ -> Just ErrCopyPathDoesNotExist diff --git a/ouroboros-consensus/test/storage-test/Test/Ouroboros/Storage/LedgerDB/V1/BackingStore/Mock.hs b/ouroboros-consensus/test/storage-test/Test/Ouroboros/Storage/LedgerDB/V1/BackingStore/Mock.hs new file mode 100644 index 0000000000..b211d2a320 --- /dev/null +++ b/ouroboros-consensus/test/storage-test/Test/Ouroboros/Storage/LedgerDB/V1/BackingStore/Mock.hs @@ -0,0 +1,338 @@ +{-# LANGUAGE DeriveFunctor #-} +{-# LANGUAGE DerivingStrategies #-} +{-# LANGUAGE FlexibleContexts #-} +{-# LANGUAGE GeneralisedNewtypeDeriving #-} +{-# LANGUAGE LambdaCase #-} +{-# LANGUAGE MultiParamTypeClasses #-} +{-# LANGUAGE NamedFieldPuns #-} +{-# LANGUAGE Rank2Types #-} +{-# LANGUAGE ScopedTypeVariables #-} + +module Test.Ouroboros.Storage.LedgerDB.V1.BackingStore.Mock ( + -- * Types + Err (..) + , ID (..) + , Mock (..) + , ValueHandle (..) + , ValueHandleStatus (..) + , emptyMock + -- * Type classes + , ApplyDiff (..) + , DiffSize (..) + , EmptyValues (..) + , HasOps + , KeysSize (..) + , LookupKeys (..) + , LookupKeysRange (..) + , MakeDiff (..) + , ValuesLength (..) + -- * State monad to run the mock in + , MockState (..) + , runMockState + -- * Mocked @'BackingStore'@ operations + , mBSClose + , mBSCopy + , mBSInitFromCopy + , mBSInitFromValues + , mBSVHAtSlot + , mBSVHClose + , mBSVHRangeRead + , mBSVHRead + , mBSVHStat + , mBSValueHandle + , mBSWrite + , mGuardBSClosed + , mGuardBSVHClosed + ) where + +import Control.Monad +import Control.Monad.Except (ExceptT (..), MonadError (throwError), + catchError, runExceptT) +import Control.Monad.State (MonadState, State, StateT (StateT), gets, + modify, runState) +import Data.Map.Strict (Map) +import qualified Data.Map.Strict as Map +import Ouroboros.Consensus.Block.Abstract (SlotNo, WithOrigin (..)) +import qualified Ouroboros.Consensus.Storage.LedgerDB.V1.BackingStore as BS +import qualified System.FS.API.Types as FS + +{------------------------------------------------------------------------------- + Types +-------------------------------------------------------------------------------} + +data Mock vs = Mock { + backingValues :: vs + , backingSeqNo :: WithOrigin SlotNo + , copies :: Map FS.FsPath (WithOrigin SlotNo, vs) + , isClosed :: Bool + -- | Track whether value handles have been closed. + , valueHandles :: Map ID ValueHandleStatus + -- | The next id to use if a new value handle is opened. + , nextId :: ID + } + deriving stock (Show, Eq) + +data ValueHandleStatus = Open | ClosedByStore | ClosedByHandle + deriving stock (Show, Eq) + +data ValueHandle values = ValueHandle { + getId :: ID + , values :: values + , seqNo :: WithOrigin SlotNo + } + deriving stock Show + +instance Eq (ValueHandle vs) where + x == y = getId x == getId y + +instance Ord (ValueHandle vs) where + x <= y = getId x < getId y + +-- | An ID for a mocked value handle. +newtype ID = ID Word + deriving stock (Show, Eq, Ord) + deriving newtype Num + +-- | An empty mock state. +emptyMock :: EmptyValues vs => Mock vs +emptyMock = Mock { + backingValues = emptyValues + , backingSeqNo = Origin + , copies = Map.empty + , isClosed = False + , valueHandles = Map.empty + , nextId = 0 + } + +data Err = + ErrBackingStoreClosed + | ErrBackingStoreValueHandleClosed + | ErrCopyPathAlreadyExists + | ErrCopyPathDoesNotExist + | ErrNonMonotonicSeqNo (WithOrigin SlotNo) (WithOrigin SlotNo) + deriving stock (Show, Eq) + +{------------------------------------------------------------------------------- + Type classes +-------------------------------------------------------------------------------} + +-- | Abstract over interactions between values, keys and diffs. +class ( EmptyValues vs, ApplyDiff vs d, LookupKeysRange ks vs + , LookupKeys ks vs, ValuesLength vs, MakeDiff vs d + , DiffSize d, KeysSize ks + ) => HasOps ks vs d + +class EmptyValues vs where + emptyValues :: vs + +class ApplyDiff vs d where + applyDiff :: vs -> d -> vs + +class LookupKeysRange ks vs where + lookupKeysRange :: Maybe ks -> Int -> vs -> vs + +class LookupKeys ks vs where + lookupKeys :: ks -> vs -> vs + +class ValuesLength vs where + valuesLength :: vs -> Int + +class MakeDiff vs d where + diff :: vs -> vs -> d + +class DiffSize d where + diffSize :: d -> Int + +class KeysSize ks where + keysSize :: ks -> Int + +{------------------------------------------------------------------------------- + State monad to run the mock in +-------------------------------------------------------------------------------} + +-- | State within which the mock runs. +newtype MockState ks vs d a = + MockState (ExceptT Err (State (Mock vs)) a) + deriving stock Functor + deriving newtype ( Applicative + , Monad + , MonadState (Mock vs) + , MonadError Err + ) + +runMockState :: + MockState ks vs d a + -> Mock vs + -> (Either Err a, Mock vs) +runMockState (MockState t) = runState . runExceptT $ t + +{------------------------------------------------------------------------------ + Mocked @'BackingStore'@ operations +------------------------------------------------------------------------------} + +mBSInitFromValues :: + forall vs m. (MonadState (Mock vs) m) + => WithOrigin SlotNo + -> vs + -> m () +mBSInitFromValues sl vs = modify (\m -> m { + backingValues = vs + , backingSeqNo = sl + , isClosed = False + }) + +mBSInitFromCopy :: + forall vs m. (MonadState (Mock vs) m, MonadError Err m) + => FS.FsPath + -> m () +mBSInitFromCopy bsp = do + cps <- gets copies + case Map.lookup bsp cps of + Nothing -> throwError ErrCopyPathDoesNotExist + Just (sl, vs) -> modify (\m -> m { + backingValues = vs + , backingSeqNo = sl + , isClosed = False + }) + +-- | Throw an error if the backing store has been closed. +mGuardBSClosed :: (MonadState (Mock vs) m, MonadError Err m) => m () +mGuardBSClosed = do + closed <- gets isClosed + when closed $ + throwError ErrBackingStoreClosed + +-- | Close the backing store. +-- +-- Closing is idempotent. +mBSClose :: (MonadState (Mock vs) m, MonadError Err m) => m () +mBSClose = (mGuardBSClosed >> close) `catchError` handler + where + close = modify (\m -> m { + isClosed = True + , valueHandles = fmap (const ClosedByStore) (valueHandles m) + }) + handler = \case + ErrBackingStoreClosed -> pure () + e -> throwError e + +-- | Copy the contents of the backing store to the given path. +mBSCopy :: (MonadState (Mock vs) m, MonadError Err m) => FS.FsPath -> m () +mBSCopy bsp = do + mGuardBSClosed + cps <- gets copies + when (bsp `Map.member` cps) $ + throwError ErrCopyPathAlreadyExists + modify (\m -> m { + copies = Map.insert bsp (backingSeqNo m, backingValues m) (copies m) + }) + +-- | Open a new value handle, which captures the state of the backing store +-- at the time of opening the handle. +mBSValueHandle :: + (MonadState (Mock vs) m, MonadError Err m) + => m (ValueHandle vs) +mBSValueHandle = do + mGuardBSClosed + vs <- gets backingValues + seqNo <- gets backingSeqNo + nxt <- gets nextId + let + vh = ValueHandle nxt vs seqNo + modify (\m -> m { + valueHandles = Map.insert nxt Open (valueHandles m) + , nextId = nxt + 1 + }) + + pure vh + +-- | Write a diff to the backing store. +mBSWrite :: + (MonadState (Mock vs) m, MonadError Err m, ApplyDiff vs d) + => SlotNo + -> d + -> m () +mBSWrite sl d = do + mGuardBSClosed + vs <- gets backingValues + seqNo <- gets backingSeqNo + when (seqNo > NotOrigin sl) $ + throwError $ ErrNonMonotonicSeqNo (NotOrigin sl) seqNo + modify (\m -> m { + backingValues = applyDiff vs d + , backingSeqNo = NotOrigin sl + }) + +-- | Throw an error if the given backing store value handle has been closed. +mGuardBSVHClosed :: + (MonadState (Mock vs) m, MonadError Err m) + => ValueHandle vs + -> m () +mGuardBSVHClosed vh = do + vhs <- gets valueHandles + case Map.lookup (getId vh) vhs of + Nothing -> error "Value handle not found" + Just status -> + case status of + ClosedByStore -> throwError ErrBackingStoreClosed + ClosedByHandle -> throwError ErrBackingStoreValueHandleClosed + _ -> pure () + +-- | Close a backing store value handle. +-- +-- Closing is idempotent. +mBSVHClose :: + (MonadState (Mock vs) m, MonadError Err m) + => ValueHandle vs + -> m () +mBSVHClose vh = + (mGuardBSClosed >> mGuardBSVHClosed vh >> close) `catchError` handler + where + close = do + vhs <- gets valueHandles + modify (\m -> m { + valueHandles = Map.adjust (const ClosedByHandle) (getId vh) vhs + }) + handler = \case + ErrBackingStoreClosed -> pure () + ErrBackingStoreValueHandleClosed -> pure () + e -> throwError e + +-- | Perform a range read on a backing store value handle. +mBSVHRangeRead :: + (MonadState (Mock vs) m, MonadError Err m, LookupKeysRange ks vs) + => ValueHandle vs + -> BS.RangeQuery ks + -> m vs +mBSVHRangeRead vh BS.RangeQuery{BS.rqPrev, BS.rqCount} = do + mGuardBSClosed + mGuardBSVHClosed vh + let + vs = values vh + pure $ lookupKeysRange rqPrev rqCount vs + +-- | Perform a regular read on a backing store value handle +mBSVHRead :: + (MonadState (Mock vs) m, MonadError Err m, LookupKeys ks vs) + => ValueHandle vs + -> ks + -> m vs +mBSVHRead vh ks = do + mGuardBSClosed + mGuardBSVHClosed vh + let vs = values vh + pure $ lookupKeys ks vs + +-- | Read the slot number out of a value handle +mBSVHAtSlot :: Monad m => ValueHandle vs -> m (WithOrigin SlotNo) +mBSVHAtSlot = pure . seqNo + +-- | Retrieve statistics for the backing store value handle. +mBSVHStat :: + (MonadState (Mock vs) m, MonadError Err m, ValuesLength vs) + => ValueHandle vs + -> m BS.Statistics +mBSVHStat vh = do + mGuardBSClosed + mGuardBSVHClosed vh + pure $ BS.Statistics (seqNo vh) (valuesLength $ values vh) diff --git a/ouroboros-consensus/test/storage-test/Test/Ouroboros/Storage/LedgerDB/V1/BackingStore/Registry.hs b/ouroboros-consensus/test/storage-test/Test/Ouroboros/Storage/LedgerDB/V1/BackingStore/Registry.hs new file mode 100644 index 0000000000..a2881d38c2 --- /dev/null +++ b/ouroboros-consensus/test/storage-test/Test/Ouroboros/Storage/LedgerDB/V1/BackingStore/Registry.hs @@ -0,0 +1,62 @@ +{-# LANGUAGE DerivingStrategies #-} +{-# LANGUAGE FlexibleContexts #-} +{-# LANGUAGE GeneralisedNewtypeDeriving #-} +{-# LANGUAGE NamedFieldPuns #-} +{-# LANGUAGE Rank2Types #-} +{-# LANGUAGE ScopedTypeVariables #-} + +-- | A utility for storing and retrieving resources in a registry using handles +-- to identify resources in the registry. +module Test.Ouroboros.Storage.LedgerDB.V1.BackingStore.Registry ( + Handle + , HandleRegistry + , initHandleRegistry + , readHandle + , registerHandle + ) where + +import Control.Monad.Class.MonadSTM.Internal as STM + (MonadSTM (TVar, atomically, newTVarIO, readTVar, writeTVar)) +import Data.Map.Strict (Map) +import qualified Data.Map.Strict as Map +import Ouroboros.Consensus.Util.IOLike (IOLike) + +newtype Handle = Handle Word + deriving stock (Show, Eq, Ord) + deriving newtype Num + +data HandleRegistry m a = HandleRegistry { + handles :: TVar m (Map Handle a) + , nextHandle :: TVar m Handle + } + +initHandleRegistry :: IOLike m => m (HandleRegistry m a) +initHandleRegistry = do + handles <- STM.newTVarIO Map.empty + nextHandle <- STM.newTVarIO 0 + pure $ HandleRegistry { handles, nextHandle } + +registerHandle :: + IOLike m + => HandleRegistry m a + -> a + -> m Handle +registerHandle HandleRegistry{handles, nextHandle} bsvh = STM.atomically $ do + vhs <- STM.readTVar handles + nh <- STM.readTVar nextHandle + let + vhs' = Map.insert nh bsvh vhs + STM.writeTVar handles vhs' + STM.writeTVar nextHandle (nh + 1) + pure nh + +readHandle :: + IOLike m + => HandleRegistry m a + -> Handle + -> m a +readHandle HandleRegistry{handles} h = STM.atomically $ do + vhs <- STM.readTVar handles + case Map.lookup h vhs of + Nothing -> error "Handle not found" + Just vh -> pure vh diff --git a/ouroboros-consensus/test/storage-test/Test/Ouroboros/Storage/LedgerDB/InMemory.hs b/ouroboros-consensus/test/storage-test/Test/Ouroboros/Storage/LedgerDB/V1/DbChangelog/QuickCheck.hs similarity index 71% rename from ouroboros-consensus/test/storage-test/Test/Ouroboros/Storage/LedgerDB/InMemory.hs rename to ouroboros-consensus/test/storage-test/Test/Ouroboros/Storage/LedgerDB/V1/DbChangelog/QuickCheck.hs index 2904da9c0a..fb98793e7a 100644 --- a/ouroboros-consensus/test/storage-test/Test/Ouroboros/Storage/LedgerDB/InMemory.hs +++ b/ouroboros-consensus/test/storage-test/Test/Ouroboros/Storage/LedgerDB/V1/DbChangelog/QuickCheck.hs @@ -4,7 +4,6 @@ {-# LANGUAGE NamedFieldPuns #-} {-# LANGUAGE RecordWildCards #-} {-# LANGUAGE ScopedTypeVariables #-} -{-# LANGUAGE TypeApplications #-} {-# LANGUAGE UndecidableInstances #-} {-# OPTIONS_GHC -Wno-orphans #-} @@ -25,11 +24,8 @@ -- * The maximum rollback supported is always @k@ (unless we are near genesis) -- * etc. -- -module Test.Ouroboros.Storage.LedgerDB.InMemory (tests) where +module Test.Ouroboros.Storage.LedgerDB.V1.DbChangelog.QuickCheck (tests) where -import Codec.CBOR.FlatTerm (FlatTerm, TermToken (..), fromFlatTerm, - toFlatTerm) -import Codec.Serialise (decode, encode) import Data.Maybe (fromJust) import Data.Word import Ouroboros.Consensus.Block @@ -37,24 +33,20 @@ import Ouroboros.Consensus.BlockchainTime import Ouroboros.Consensus.Config import qualified Ouroboros.Consensus.HardFork.History as HardFork import Ouroboros.Consensus.Ledger.Abstract -import Ouroboros.Consensus.Storage.LedgerDB +import Ouroboros.Consensus.Storage.LedgerDB.API.Config +import Ouroboros.Consensus.Storage.LedgerDB.V1.DbChangelog hiding + (tip) import Ouroboros.Consensus.Util -import Test.Ouroboros.Storage.LedgerDB.OrphanArbitrary () import Test.QuickCheck import Test.Tasty -import Test.Tasty.HUnit import Test.Tasty.QuickCheck +import Test.Util.Orphans.Arbitrary () import Test.Util.QuickCheck import Test.Util.TestBlock tests :: TestTree tests = testGroup "InMemory" [ - testGroup "Serialisation" [ - testCase "encode" test_encode_ledger - , testCase "decode" test_decode_ledger - , testCase "decode ChainSummary" test_decode_ChainSummary - ] - , testGroup "Genesis" [ + testGroup "Genesis" [ testProperty "current" prop_genesisCurrent ] , testGroup "Push" [ @@ -70,67 +62,15 @@ tests = testGroup "InMemory" [ ] ] -{------------------------------------------------------------------------------- - Serialisation --------------------------------------------------------------------------------} - --- | The LedgerDB is parametric in the ledger @l@. We use @Int@ for simplicity. -example_ledger :: Int -example_ledger = 100 - -golden_ledger :: FlatTerm -golden_ledger = - [ TkListLen 2 - -- VersionNumber - , TkInt 1 - -- ledger: Int - , TkInt 100 - ] - --- | The old format based on the @ChainSummary@. To remain backwards compatible --- we still accept this old format. -golden_ChainSummary :: FlatTerm -golden_ChainSummary = - [ TkListLen 3 - -- tip: WithOrigin (RealPoint TestBlock) - , TkListLen 1 - , TkListLen 2 - , TkInt 3 - , TkListBegin, TkInt 0, TkInt 0, TkBreak - -- chain length: Word64 - , TkInt 10 - -- ledger: Int for simplicity - , TkInt 100 - ] - -test_encode_ledger :: Assertion -test_encode_ledger = - toFlatTerm (enc example_ledger) @?= golden_ledger - where - enc = encodeSnapshot encode - -test_decode_ledger :: Assertion -test_decode_ledger = - fromFlatTerm dec golden_ledger @?= Right example_ledger - where - dec = decodeSnapshotBackwardsCompatible (Proxy @TestBlock) decode decode - --- | For backwards compatibility -test_decode_ChainSummary :: Assertion -test_decode_ChainSummary = - fromFlatTerm dec golden_ChainSummary @?= Right example_ledger - where - dec = decodeSnapshotBackwardsCompatible (Proxy @TestBlock) decode decode - {------------------------------------------------------------------------------- Genesis -------------------------------------------------------------------------------} prop_genesisCurrent :: Property prop_genesisCurrent = - ledgerDbCurrent genSnaps === testInitLedger + current genSnaps === convertMapKind testInitLedger where - genSnaps = ledgerDbWithAnchor testInitLedger + genSnaps = anchorlessChangelog $ empty (convertMapKind testInitLedger) {------------------------------------------------------------------------------- Constructing snapshots @@ -140,8 +80,8 @@ prop_pushExpectedLedger :: ChainSetup -> Property prop_pushExpectedLedger setup@ChainSetup{..} = classify (chainSetupSaturated setup) "saturated" $ conjoin [ - l === refoldLedger cfg (expectedChain o) testInitLedger - | (o, l) <- ledgerDbSnapshots csPushed + l === convertMapKind (refoldLedger cfg (expectedChain o) (convertMapKind testInitLedger)) + | (o, l) <- snapshots csPushed ] where expectedChain :: Word64 -> [TestBlock] @@ -154,9 +94,9 @@ prop_pastLedger :: ChainSetup -> Property prop_pastLedger setup@ChainSetup{..} = classify (chainSetupSaturated setup) "saturated" $ classify withinReach "within reach" $ - ledgerDbPast tip csPushed + getPastLedgerAt tip csPushed === if withinReach - then Just (ledgerDbCurrent afterPrefix) + then Just (current afterPrefix) else Nothing where prefix :: [TestBlock] @@ -165,12 +105,12 @@ prop_pastLedger setup@ChainSetup{..} = tip :: Point TestBlock tip = maybe GenesisPoint blockPoint (lastMaybe prefix) - afterPrefix :: LedgerDB (LedgerState TestBlock) - afterPrefix = ledgerDbPushMany' (csBlockConfig setup) prefix csGenSnaps + afterPrefix :: AnchorlessDbChangelog (LedgerState TestBlock) + afterPrefix = reapplyThenPushMany' (csBlockConfig setup) prefix trivialKeySetsReader csGenSnaps -- See 'prop_snapshotsMaxRollback' withinReach :: Bool - withinReach = (csNumBlocks - csPrefixLen) <= ledgerDbMaxRollback csPushed + withinReach = (csNumBlocks - csPrefixLen) <= maxRollback csPushed {------------------------------------------------------------------------------- Rollback @@ -178,7 +118,7 @@ prop_pastLedger setup@ChainSetup{..} = prop_maxRollbackGenesisZero :: Property prop_maxRollbackGenesisZero = - ledgerDbMaxRollback (ledgerDbWithAnchor testInitLedger) + maxRollback (anchorlessChangelog $ empty (convertMapKind testInitLedger)) === 0 prop_snapshotsMaxRollback :: ChainSetup -> Property @@ -186,9 +126,9 @@ prop_snapshotsMaxRollback setup@ChainSetup{..} = classify (chainSetupSaturated setup) "saturated" $ conjoin [ if chainSetupSaturated setup - then (ledgerDbMaxRollback csPushed) `ge` k - else (ledgerDbMaxRollback csPushed) `ge` (min k csNumBlocks) - , (ledgerDbMaxRollback csPushed) `le` k + then (maxRollback csPushed) `ge` k + else (maxRollback csPushed) `ge` (min k csNumBlocks) + , (maxRollback csPushed) `le` k ] where SecurityParam k = csSecParam @@ -196,7 +136,7 @@ prop_snapshotsMaxRollback setup@ChainSetup{..} = prop_switchSameChain :: SwitchSetup -> Property prop_switchSameChain setup@SwitchSetup{..} = classify (switchSetupSaturated setup) "saturated" $ - ledgerDbSwitch' (csBlockConfig ssChainSetup) ssNumRollback blockInfo csPushed + switch' (csBlockConfig ssChainSetup) ssNumRollback blockInfo trivialKeySetsReader csPushed === Just csPushed where ChainSetup{csPushed} = ssChainSetup @@ -206,8 +146,8 @@ prop_switchExpectedLedger :: SwitchSetup -> Property prop_switchExpectedLedger setup@SwitchSetup{..} = classify (switchSetupSaturated setup) "saturated" $ conjoin [ - l === refoldLedger cfg (expectedChain o) testInitLedger - | (o, l) <- ledgerDbSnapshots ssSwitched + l === convertMapKind (refoldLedger cfg (expectedChain o) (convertMapKind testInitLedger)) + | (o, l) <- snapshots ssSwitched ] where expectedChain :: Word64 -> [TestBlock] @@ -221,9 +161,9 @@ prop_pastAfterSwitch :: SwitchSetup -> Property prop_pastAfterSwitch setup@SwitchSetup{..} = classify (switchSetupSaturated setup) "saturated" $ classify withinReach "within reach" $ - ledgerDbPast tip ssSwitched + getPastLedgerAt tip ssSwitched === if withinReach - then Just (ledgerDbCurrent afterPrefix) + then Just (current afterPrefix) else Nothing where prefix :: [TestBlock] @@ -232,12 +172,12 @@ prop_pastAfterSwitch setup@SwitchSetup{..} = tip :: Point TestBlock tip = maybe GenesisPoint blockPoint (lastMaybe prefix) - afterPrefix :: LedgerDB (LedgerState TestBlock) - afterPrefix = ledgerDbPushMany' (csBlockConfig ssChainSetup) prefix (csGenSnaps ssChainSetup) + afterPrefix :: AnchorlessDbChangelog (LedgerState TestBlock) + afterPrefix = reapplyThenPushMany' (csBlockConfig ssChainSetup) prefix trivialKeySetsReader (csGenSnaps ssChainSetup) -- See 'prop_snapshotsMaxRollback' withinReach :: Bool - withinReach = (ssNumBlocks - ssPrefixLen) <= ledgerDbMaxRollback ssSwitched + withinReach = (ssNumBlocks - ssPrefixLen) <= maxRollback ssSwitched {------------------------------------------------------------------------------- Test setup @@ -259,13 +199,13 @@ data ChainSetup = ChainSetup { , csPrefixLen :: Word64 -- | Derived: genesis snapshots - , csGenSnaps :: LedgerDB (LedgerState TestBlock) + , csGenSnaps :: AnchorlessDbChangelog (LedgerState TestBlock) -- | Derived: the actual blocks that got applied (old to new) , csChain :: [TestBlock] -- | Derived: the snapshots after all blocks were applied - , csPushed :: LedgerDB (LedgerState TestBlock) + , csPushed :: AnchorlessDbChangelog (LedgerState TestBlock) } deriving (Show) @@ -283,7 +223,7 @@ csBlockConfig' secParam = LedgerDbCfg { slotLength = slotLengthFromSec 20 chainSetupSaturated :: ChainSetup -> Bool -chainSetupSaturated ChainSetup{..} = ledgerDbIsSaturated csSecParam csPushed +chainSetupSaturated ChainSetup{..} = isSaturated csSecParam csPushed data SwitchSetup = SwitchSetup { -- | Chain setup @@ -313,7 +253,7 @@ data SwitchSetup = SwitchSetup { , ssChain :: [TestBlock] -- | Derived; the snapshots after the switch was performed - , ssSwitched :: LedgerDB (LedgerState TestBlock) + , ssSwitched :: AnchorlessDbChangelog (LedgerState TestBlock) } deriving (Show) @@ -324,10 +264,10 @@ mkTestSetup :: SecurityParam -> Word64 -> Word64 -> ChainSetup mkTestSetup csSecParam csNumBlocks csPrefixLen = ChainSetup {..} where - csGenSnaps = ledgerDbWithAnchor testInitLedger + csGenSnaps = anchorlessChangelog $ empty (convertMapKind testInitLedger) csChain = take (fromIntegral csNumBlocks) $ iterate successorBlock (firstBlock 0) - csPushed = ledgerDbPushMany' (csBlockConfig' csSecParam) csChain csGenSnaps + csPushed = reapplyThenPushMany' (csBlockConfig' csSecParam) csChain trivialKeySetsReader csGenSnaps mkRollbackSetup :: ChainSetup -> Word64 -> Word64 -> Word64 -> SwitchSetup mkRollbackSetup ssChainSetup ssNumRollback ssNumNew ssPrefixLen = @@ -348,7 +288,7 @@ mkRollbackSetup ssChainSetup ssNumRollback ssNumNew ssPrefixLen = take (fromIntegral (csNumBlocks - ssNumRollback)) csChain , ssNewBlocks ] - ssSwitched = fromJust $ ledgerDbSwitch' (csBlockConfig ssChainSetup) ssNumRollback ssNewBlocks csPushed + ssSwitched = fromJust $ switch' (csBlockConfig ssChainSetup) ssNumRollback ssNewBlocks trivialKeySetsReader csPushed instance Arbitrary ChainSetup where arbitrary = do @@ -373,7 +313,7 @@ instance Arbitrary ChainSetup where instance Arbitrary SwitchSetup where arbitrary = do chainSetup <- arbitrary - numRollback <- choose (0, ledgerDbMaxRollback (csPushed chainSetup)) + numRollback <- choose (0, maxRollback (csPushed chainSetup)) numNew <- choose (numRollback, 2 * numRollback) prefixLen <- choose (0, csNumBlocks chainSetup - numRollback + numNew) return $ mkRollbackSetup chainSetup numRollback numNew prefixLen @@ -382,7 +322,7 @@ instance Arbitrary SwitchSetup where -- If we shrink the chain setup, we might restrict max rollback [ mkRollbackSetup ssChainSetup' ssNumRollback ssNumNew ssPrefixLen | ssChainSetup' <- shrink ssChainSetup - , ssNumRollback <= ledgerDbMaxRollback (csPushed ssChainSetup') + , ssNumRollback <= maxRollback (csPushed ssChainSetup') ] -- Number of new blocks must be at least the rollback , [ mkRollbackSetup ssChainSetup ssNumRollback ssNumNew' ssPrefixLen diff --git a/ouroboros-consensus/test/storage-test/Test/Ouroboros/Storage/LedgerDB/V1/DbChangelog/Unit.hs b/ouroboros-consensus/test/storage-test/Test/Ouroboros/Storage/LedgerDB/V1/DbChangelog/Unit.hs new file mode 100644 index 0000000000..639f5c2aac --- /dev/null +++ b/ouroboros-consensus/test/storage-test/Test/Ouroboros/Storage/LedgerDB/V1/DbChangelog/Unit.hs @@ -0,0 +1,339 @@ +{-# LANGUAGE DataKinds #-} +{-# LANGUAGE DeriveAnyClass #-} +{-# LANGUAGE DeriveGeneric #-} +{-# LANGUAGE DerivingStrategies #-} +{-# LANGUAGE FlexibleContexts #-} +{-# LANGUAGE FlexibleInstances #-} +{-# LANGUAGE PatternSynonyms #-} +{-# LANGUAGE ScopedTypeVariables #-} +{-# LANGUAGE StandaloneDeriving #-} +{-# LANGUAGE TypeFamilies #-} +{-# LANGUAGE UndecidableInstances #-} +{-# OPTIONS_GHC -Wno-orphans #-} + +module Test.Ouroboros.Storage.LedgerDB.V1.DbChangelog.Unit (tests) where + +import Cardano.Slotting.Slot (WithOrigin (..), withOrigin) +import Control.Monad hiding (ap) +import Control.Monad.Trans.Class (lift) +import Control.Monad.Trans.State.Strict hiding (state) +import Data.Foldable +import qualified Data.Map.Diff.Strict.Internal as Diff +import Data.Map.Strict (Map) +import qualified Data.Map.Strict as Map +import Data.Maybe (catMaybes, fromJust, isJust, isNothing) +import Data.Set (Set) +import qualified Data.Set as Set +import GHC.Generics (Generic) +import NoThunks.Class (NoThunks) +import Ouroboros.Consensus.Config.SecurityParam (SecurityParam (..)) +import Ouroboros.Consensus.Ledger.Basics hiding (Key, LedgerState) +import qualified Ouroboros.Consensus.Ledger.Basics as Ledger +import Ouroboros.Consensus.Ledger.Tables.Diff (fromAntiDiff) +import Ouroboros.Consensus.Ledger.Tables.DiffSeq as DS +import Ouroboros.Consensus.Storage.LedgerDB.V1.DbChangelog + (DbChangelog (..)) +import qualified Ouroboros.Consensus.Storage.LedgerDB.V1.DbChangelog as DbChangelog +import qualified Ouroboros.Network.AnchoredSeq as AS +import Ouroboros.Network.Block (HeaderHash, Point (..), SlotNo (..), + StandardHash, castPoint, pattern GenesisPoint) +import qualified Ouroboros.Network.Point as Point +import Test.QuickCheck +import Test.Tasty (TestTree, testGroup) +import Test.Tasty.QuickCheck (testProperty) +import Test.Util.Orphans.Arbitrary () +import Test.Util.QuickCheck (frequency', oneof') +import Text.Show.Pretty (ppShow) + +samples :: Int +samples = 1000 + +tests :: TestTree +tests = testGroup "DbChangelog" + [ testProperty "flushing" $ verboseShrinking $ withMaxSuccess samples $ conjoin + [ counterexample "flushing keeps immutable tip" + prop_flushingSplitsTheChangelog + ] + , testProperty "rolling back" $ withMaxSuccess samples $ conjoin + [ counterexample "rollback after extension is noop" + prop_rollbackAfterExtendIsNoop + , counterexample "prefixing back to anchor is rolling back volatile states" + prop_prefixBackToAnchorIsRollingBackVolatileStates + , counterexample "prefix back to volatile tip is a noop" + prop_rollBackToVolatileTipIsNoop + ] + , testProperty "extending adds head to volatile states" + $ withMaxSuccess samples prop_extendingAdvancesTipOfVolatileStates + , testProperty "pruning leaves at most maxRollback volatile states" + $ withMaxSuccess samples prop_pruningLeavesAtMostMaxRollbacksVolatileStates + ] + + + +{------------------------------------------------------------------------------- + Test setup +-------------------------------------------------------------------------------} + +data TestLedger (mk :: MapKind) = TestLedger { + tlUtxos :: mk Key Int, + tlTip :: Point TestLedger +} + +nextState :: DbChangelog TestLedger -> TestLedger DiffMK +nextState dblog = TestLedger { + tlTip = pointAtSlot $ nextSlot (getTipSlot old) + , tlUtxos = DiffMK mempty + } + where + old = DbChangelog.current $ anchorlessChangelog dblog + nextSlot = At . withOrigin 1 (+1) + + +deriving instance Show (mk Key Int) => Show (TestLedger mk) + +instance GetTip TestLedger where + getTip = castPoint . tlTip + +data H = H deriving (Eq, Ord, Show, Generic) +deriving anyclass instance NoThunks H +type instance HeaderHash TestLedger = H + +instance StandardHash TestLedger + +deriving instance Eq (TestLedger EmptyMK) + +type instance Ledger.Key TestLedger = Key +type instance Ledger.Value TestLedger = Int + +instance HasLedgerTables TestLedger where + projectLedgerTables = LedgerTables . tlUtxos + withLedgerTables st (LedgerTables x) = st { tlUtxos = x } + +data DbChangelogTestSetup = DbChangelogTestSetup { + -- The operations are applied on the right, i.e., the newest operation is at the head of the list. + operations :: [Operation TestLedger] + , dbChangelogStartsAt :: WithOrigin SlotNo + } + +data Operation l = Extend (l DiffMK) | Prune SecurityParam +deriving instance Show (l DiffMK) => Show (Operation l) + +data DbChangelogTestSetupWithRollbacks = DbChangelogTestSetupWithRollbacks + { testSetup :: DbChangelogTestSetup + , rollbacks :: Int + } deriving (Show) + +instance Show DbChangelogTestSetup where + show = ppShow . operations + +instance Arbitrary DbChangelogTestSetup where + arbitrary = sized $ \n -> do + slotNo <- oneof [pure Origin, At . SlotNo <$> chooseEnum (1, 1000)] + ops <- genOperations slotNo n + pure $ DbChangelogTestSetup + { operations = ops + , dbChangelogStartsAt = slotNo + } + + -- TODO: Shrinking might not be optimal. Shrinking finds the shortest prefix of the list of + -- operations that result in a failed property, by simply testing prefixes in increasing order. + shrink setup = reverse $ takeWhileJust $ drop 1 (iterate reduce (Just setup)) + where + reduce (Just (DbChangelogTestSetup (_:ops) dblog)) = Just $ DbChangelogTestSetup ops dblog + reduce _ = Nothing + takeWhileJust = catMaybes . takeWhile isJust + +instance Arbitrary DbChangelogTestSetupWithRollbacks where + arbitrary = do + setup <- arbitrary + let dblog = resultingDbChangelog setup + rolls <- chooseInt (0, AS.length (DbChangelog.adcStates $ DbChangelog.anchorlessChangelog dblog)) + pure $ DbChangelogTestSetupWithRollbacks + { testSetup = setup + , rollbacks = rolls + } + + shrink setupWithRollback = toWithRollbacks <$> setups + where + setups = shrink (testSetup setupWithRollback) + shrinkRollback :: DbChangelogTestSetup -> Int -> Int + shrinkRollback setup rollback = + AS.length (DbChangelog.adcStates $ DbChangelog.anchorlessChangelog $ resultingDbChangelog setup) `min` rollback + toWithRollbacks setup = DbChangelogTestSetupWithRollbacks { + testSetup = setup + , rollbacks = shrinkRollback setup (rollbacks setupWithRollback) + } + +resultingDbChangelog :: DbChangelogTestSetup -> DbChangelog TestLedger +resultingDbChangelog setup = applyOperations (operations setup) originalDbChangelog + where + originalDbChangelog = DbChangelog.empty $ TestLedger EmptyMK anchor + anchor = pointAtSlot (dbChangelogStartsAt setup) + +applyOperations :: (HasLedgerTables l, GetTip l) + => [Operation l] -> DbChangelog l -> DbChangelog l +applyOperations ops dblog = foldr' apply' dblog ops + where apply' (Extend newState) dblog' = DbChangelog.onChangelog (DbChangelog.extend newState) dblog' + apply' (Prune sp) dblog' = DbChangelog.onChangelog (DbChangelog.prune sp) dblog' + +{------------------------------------------------------------------------------- + Properties +-------------------------------------------------------------------------------} + +-- | Changelog states and diffs appear in one either the changelog to flush or the changelog to +-- keep, moreover, the to flush changelog has no volatile states, and the to keep changelog has no +-- immutable states. +prop_flushingSplitsTheChangelog :: DbChangelogTestSetup -> Property +prop_flushingSplitsTheChangelog setup = isNothing toFlush .||. + ( toKeepTip === At toFlushTip + .&&. fromAntiDiff (cumulativeDiff diffs) === toFlushDiffs <> fromAntiDiff (cumulativeDiff toKeepDiffs) + ) + where + dblog = resultingDbChangelog setup + (toFlush, toKeep) = DbChangelog.splitForFlushing dblog + toFlushTip = maybe undefined DbChangelog.toFlushSlot toFlush + toKeepTip = DbChangelog.immutableTipSlot $ anchorlessChangelog toKeep + LedgerTables (SeqDiffMK toKeepDiffs) = DbChangelog.adcDiffs $ anchorlessChangelog toKeep + LedgerTables (DiffMK toFlushDiffs) = maybe undefined DbChangelog.toFlushDiffs toFlush + LedgerTables (SeqDiffMK diffs) = DbChangelog.adcDiffs $ anchorlessChangelog dblog + +-- | Extending the changelog adds the correct head to the volatile states. +prop_extendingAdvancesTipOfVolatileStates :: DbChangelogTestSetup -> Property +prop_extendingAdvancesTipOfVolatileStates setup = + property $ tlTip state == tlTip new + where + dblog = resultingDbChangelog setup + state = nextState dblog + dblog' = DbChangelog.onChangelog (DbChangelog.extend state) dblog + new = AS.headAnchor (DbChangelog.adcStates $ anchorlessChangelog dblog') + +-- | Rolling back n extensions is the same as doing nothing. +prop_rollbackAfterExtendIsNoop :: DbChangelogTestSetup -> Positive Int -> Property +prop_rollbackAfterExtendIsNoop setup (Positive n) = + property (dblog == fromJust (DbChangelog.onChangelogM (DbChangelog.rollbackN (fromIntegral n)) $ nExtensions n dblog)) + where + dblog = resultingDbChangelog setup + +-- | The number of volatile states left after pruning is at most the maximum number of rollbacks. +prop_pruningLeavesAtMostMaxRollbacksVolatileStates :: + DbChangelogTestSetup -> SecurityParam -> Property +prop_pruningLeavesAtMostMaxRollbacksVolatileStates setup sp@(SecurityParam k) = + property $ AS.length (DbChangelog.adcStates $ anchorlessChangelog dblog') <= fromIntegral k + where + dblog = resultingDbChangelog setup + dblog' = DbChangelog.onChangelog (DbChangelog.prune sp) dblog + +-- | The prefixBackToAnchor function rolls back all volatile states. +prop_prefixBackToAnchorIsRollingBackVolatileStates :: DbChangelogTestSetup -> Property +prop_prefixBackToAnchorIsRollingBackVolatileStates setup = + property $ rolledBack == toAnchor + where + dblog = resultingDbChangelog setup + n = AS.length (DbChangelog.adcStates $ anchorlessChangelog dblog) + rolledBack = fromJust $ DbChangelog.onChangelogM (DbChangelog.rollbackN (fromIntegral n)) dblog + toAnchor = DbChangelog.onChangelog DbChangelog.rollbackToAnchor dblog + +-- | Rolling back to the last state is the same as doing nothing. +prop_rollBackToVolatileTipIsNoop :: + Positive Int -> DbChangelogTestSetup -> Property +prop_rollBackToVolatileTipIsNoop (Positive n) setup = property $ Just dblog == dblog' + where + dblog = resultingDbChangelog setup + pt = getTip $ DbChangelog.current $ anchorlessChangelog dblog + dblog' = DbChangelog.onChangelogM (DbChangelog.rollbackToPoint pt) $ nExtensions n dblog + +nExtensions :: Int -> DbChangelog TestLedger -> DbChangelog TestLedger +nExtensions n dblog = iterate ext dblog !! n + where ext dblog' = DbChangelog.onChangelog (DbChangelog.extend (nextState dblog')) dblog' + +{------------------------------------------------------------------------------- + Generators +-------------------------------------------------------------------------------} + +pointAtSlot :: WithOrigin SlotNo -> Point TestLedger +pointAtSlot = Point.withOrigin GenesisPoint (\slotNo -> Point $ At $ Point.Block slotNo H) + +type Key = String + +data GenOperationsState = GenOperationsState { + gosSlotNo :: !(WithOrigin SlotNo) + , gosOps :: ![Operation TestLedger] + , gosActiveUtxos :: !(Map Key Int) + , gosPendingInsertions :: !(Map Key Int) + , gosConsumedUtxos :: !(Set Key) + } deriving (Show) + +applyPending :: GenOperationsState -> GenOperationsState +applyPending gosState = gosState + { gosActiveUtxos = Map.union (gosActiveUtxos gosState) (gosPendingInsertions gosState) + , gosPendingInsertions = Map.empty + } + +genOperations :: WithOrigin SlotNo -> Int -> Gen [Operation TestLedger] +genOperations slotNo nOps = gosOps <$> execStateT (replicateM_ nOps genOperation) initState + where + initState = GenOperationsState { + gosSlotNo = slotNo + , gosActiveUtxos = Map.empty + , gosPendingInsertions = Map.empty + , gosConsumedUtxos = Set.empty + , gosOps = [] + } + + genOperation :: StateT GenOperationsState Gen () + genOperation = do + op <- frequency' [ (1, genPrune), (10, genExtend) ] + modify' $ \st -> st { gosOps = op:gosOps st } + + genPrune :: StateT GenOperationsState Gen (Operation TestLedger) + genPrune = Prune . SecurityParam <$> lift (chooseEnum (0, 10)) + + genExtend :: StateT GenOperationsState Gen (Operation TestLedger) + genExtend = do + nextSlotNo <- advanceSlotNo =<< lift (chooseEnum (1, 5)) + d <- genUtxoDiff + pure $ Extend $ TestLedger (DiffMK $ fromAntiDiff d) (castPoint $ pointAtSlot nextSlotNo) + + advanceSlotNo :: SlotNo -> StateT GenOperationsState Gen (WithOrigin SlotNo) + advanceSlotNo by = do + nextSlotNo <- gets (At . Point.withOrigin by (+ by) . gosSlotNo) + modify' $ \st -> st { gosSlotNo = nextSlotNo } + pure nextSlotNo + + genUtxoDiff :: StateT GenOperationsState Gen (Diff.Diff Key Int) + genUtxoDiff = do + nEntries <- lift $ chooseInt (1, 10) + entries <- replicateM nEntries genUtxoDiffEntry + modify' applyPending + pure $ Diff.fromList entries + + genUtxoDiffEntry :: StateT GenOperationsState Gen (Key, Diff.Delta Int) + genUtxoDiffEntry = do + activeUtxos <- gets gosActiveUtxos + consumedUtxos <- gets gosConsumedUtxos + oneof' $ catMaybes [ + genDelEntry activeUtxos, + genInsertEntry consumedUtxos] + + genDelEntry :: Map Key Int -> Maybe (StateT GenOperationsState Gen (Key, Diff.Delta Int)) + genDelEntry activeUtxos = + if Map.null activeUtxos then Nothing + else Just $ do + (k, _) <- lift $ elements (Map.toList activeUtxos) + modify' $ \st -> st + { gosActiveUtxos = Map.delete k (gosActiveUtxos st) + } + pure (k, Diff.Delete) + + genInsertEntry :: Set Key -> Maybe (StateT GenOperationsState Gen (Key, Diff.Delta Int)) + genInsertEntry consumedUtxos = Just $ do + k <- lift $ genKey `suchThat` (`Set.notMember` consumedUtxos) + v <- lift arbitrary + modify' $ \st -> st + { gosPendingInsertions = Map.insert k v (gosPendingInsertions st) + , gosConsumedUtxos = Set.insert k (gosConsumedUtxos st) + } + pure (k, Diff.Insert v) + +genKey :: Gen Key +genKey = replicateM 2 $ elements ['A'..'Z'] diff --git a/ouroboros-consensus/test/storage-test/Test/Ouroboros/Storage/TestBlock.hs b/ouroboros-consensus/test/storage-test/Test/Ouroboros/Storage/TestBlock.hs index 979dabf525..6ea3925d9d 100644 --- a/ouroboros-consensus/test/storage-test/Test/Ouroboros/Storage/TestBlock.hs +++ b/ouroboros-consensus/test/storage-test/Test/Ouroboros/Storage/TestBlock.hs @@ -79,6 +79,7 @@ import qualified Data.Map.Strict as Map import Data.Maybe (maybeToList) import Data.TreeDiff import Data.Typeable (Typeable) +import Data.Void (Void) import Data.Word import GHC.Generics (Generic) import GHC.Stack (HasCallStack) @@ -96,6 +97,7 @@ import Ouroboros.Consensus.Ledger.Abstract import Ouroboros.Consensus.Ledger.Extended import Ouroboros.Consensus.Ledger.Inspect import Ouroboros.Consensus.Ledger.SupportsProtocol +import Ouroboros.Consensus.Ledger.Tables.Utils import Ouroboros.Consensus.Node.ProtocolInfo import Ouroboros.Consensus.Node.Run import Ouroboros.Consensus.NodeId @@ -547,7 +549,7 @@ type instance LedgerCfg (LedgerState TestBlock) = HardFork.EraParams instance GetTip (LedgerState TestBlock) where getTip = castPoint . lastAppliedPoint -instance GetTip (Ticked (LedgerState TestBlock)) where +instance GetTip (Ticked1 (LedgerState TestBlock)) where getTip = castPoint . getTip . getTickedTestLedger instance IsLedger (LedgerState TestBlock) where @@ -556,7 +558,24 @@ instance IsLedger (LedgerState TestBlock) where type AuxLedgerEvent (LedgerState TestBlock) = VoidLedgerEvent (LedgerState TestBlock) - applyChainTickLedgerResult _ _ = pureLedgerResult . TickedTestLedger + applyChainTickLedgerResult _ _ = pureLedgerResult + . TickedTestLedger + . noNewTickingDiffs + +type instance Key (LedgerState TestBlock) = Void +type instance Value (LedgerState TestBlock) = Void + +instance HasLedgerTables (LedgerState TestBlock) +instance HasLedgerTables (Ticked1 (LedgerState TestBlock)) + +instance CanSerializeLedgerTables (LedgerState TestBlock) where + +instance CanStowLedgerTables (LedgerState TestBlock) where + +instance LedgerTablesAreTrivial (LedgerState TestBlock) where + convertMapKind (TestLedger x y) = TestLedger x y +instance LedgerTablesAreTrivial (Ticked1 (LedgerState TestBlock)) where + convertMapKind (TickedTestLedger x) = TickedTestLedger (convertMapKind x) instance ApplyBlock (LedgerState TestBlock) TestBlock where applyBlockLedgerResult _ tb@TestBlock{..} (TickedTestLedger TestLedger{..}) @@ -570,7 +589,9 @@ instance ApplyBlock (LedgerState TestBlock) TestBlock where reapplyBlockLedgerResult _ tb _ = pureLedgerResult $ TestLedger (Chain.blockPoint tb) (BlockHash (blockHash tb)) -data instance LedgerState TestBlock = + getBlockKeySets _blk = trivialLedgerTables + +data instance LedgerState TestBlock mk = TestLedger { -- The ledger state simply consists of the last applied block lastAppliedPoint :: !(Point TestBlock) @@ -580,8 +601,8 @@ data instance LedgerState TestBlock = deriving anyclass (Serialise, NoThunks) -- Ticking has no effect on the test ledger state -newtype instance Ticked (LedgerState TestBlock) = TickedTestLedger { - getTickedTestLedger :: LedgerState TestBlock +newtype instance Ticked1 (LedgerState TestBlock) mk = TickedTestLedger { + getTickedTestLedger :: LedgerState TestBlock mk } instance UpdateLedger TestBlock @@ -648,10 +669,10 @@ instance HasHardForkHistory TestBlock where instance InspectLedger TestBlock where -- Use defaults -testInitLedger :: LedgerState TestBlock +testInitLedger :: LedgerState TestBlock EmptyMK testInitLedger = TestLedger GenesisPoint GenesisHash -testInitExtLedger :: ExtLedgerState TestBlock +testInitExtLedger :: ExtLedgerState TestBlock EmptyMK testInitExtLedger = ExtLedgerState { ledgerState = testInitLedger , headerState = genesisHeaderState () @@ -730,8 +751,8 @@ instance EncodeDisk TestBlock (Header TestBlock) instance DecodeDisk TestBlock (Lazy.ByteString -> Header TestBlock) where decodeDisk _ = const <$> decode -instance EncodeDisk TestBlock (LedgerState TestBlock) -instance DecodeDisk TestBlock (LedgerState TestBlock) +instance EncodeDisk TestBlock (LedgerState TestBlock EmptyMK) +instance DecodeDisk TestBlock (LedgerState TestBlock EmptyMK) instance EncodeDisk TestBlock (AnnTip TestBlock) where encodeDisk _ = encodeAnnTipIsEBB encode @@ -856,7 +877,7 @@ instance ToExpr (Tip TestBlock) deriving instance ToExpr TestBlockError deriving instance ToExpr (TipInfoIsEBB TestBlock) -deriving instance ToExpr (LedgerState TestBlock) +deriving instance ToExpr (LedgerState TestBlock EmptyMK) deriving instance ToExpr (HeaderError TestBlock) deriving instance ToExpr TestBlockOtherHeaderEnvelopeError deriving instance ToExpr (HeaderEnvelopeError TestBlock) diff --git a/scripts/ci/run-stylish.sh b/scripts/ci/run-stylish.sh index 43190a0333..58e1fae420 100755 --- a/scripts/ci/run-stylish.sh +++ b/scripts/ci/run-stylish.sh @@ -27,13 +27,27 @@ esac $fdcmd --full-path "$path" \ --extension hs \ --exclude Setup.hs \ + --exclude ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Ledger/SupportsMempool.hs \ + --exclude ouroboros-consensus-cardano/src/unstable-cardano-tools/Cardano/Tools/DBAnalyser/Analysis.hs \ + --exclude ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/LedgerDB/V2/Init.hs \ + --exclude ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/LedgerDB/V1/Init.hs \ + --exclude ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Mempool/Query.hs \ + --exclude ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Mempool/Impl/Common.hs \ --exclude ouroboros-consensus-cardano/app/DBAnalyser/Parsers.hs \ --exec-batch stylish-haskell -c .stylish-haskell.yaml -i -# We don't want these deprecation warnings to be removed accidentally -grep "#if __GLASGOW_HASKELL__ < 900 -import Data.Foldable (asum) -#endif" ouroboros-consensus-cardano/app/DBAnalyser/Parsers.hs >/dev/null 2>&1 +# We don't want these pragmas to be removed accidentally +f () { + grep "#if __GLASGOW_HASKELL__.* +import" $1 >/dev/null 2>&1 +} +f ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Ledger/SupportsMempool.hs +f ouroboros-consensus-cardano/src/unstable-cardano-tools/Cardano/Tools/DBAnalyser/Analysis.hs +f ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/LedgerDB/V2/Init.hs +f ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Storage/LedgerDB/V1/Init.hs +f ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Mempool/Query.hs +f ouroboros-consensus/src/ouroboros-consensus/Ouroboros/Consensus/Mempool/Impl/Common.hs +f ouroboros-consensus-cardano/app/DBAnalyser/Parsers.hs case "$(uname -s)" in MINGW*) git ls-files --eol | grep "w/crlf" | awk '{print $4}' | xargs dos2unix;; diff --git a/scripts/docs/modules-consensus.svg b/scripts/docs/modules-consensus.svg index 4229dea4f7..ad0bf42af3 100644 --- a/scripts/docs/modules-consensus.svg +++ b/scripts/docs/modules-consensus.svg @@ -1,3268 +1,3775 @@ - - - + + G - + cluster_0 - -Ouroboros + +Ouroboros cluster_1 - -Consensus + +Consensus cluster_2 - -Block + +Block cluster_3 - -BlockchainTime + +BlockchainTime cluster_4 - -WallClock + +WallClock cluster_5 - -Config + +Config cluster_6 - -Fragment + +Fragment cluster_7 - -HardFork + +HardFork cluster_8 - -Combinator + +Combinator cluster_9 - -Abstract + +Abstract cluster_10 - -Embed + +Embed cluster_11 - -Ledger + +Ledger cluster_12 - -Node + +Node cluster_13 - -Protocol + +Protocol cluster_14 - -Serialisation + +Serialisation cluster_15 - -State + +State cluster_16 - -History + +History cluster_17 - -Ledger + +Ledger cluster_18 - -Query + +Query cluster_19 - -Mempool + +Tables cluster_20 - -Impl + +Mempool cluster_21 - -MiniProtocol + +Impl cluster_22 - -BlockFetch + +MiniProtocol cluster_23 - -ChainSync + +BlockFetch cluster_24 - -LocalStateQuery + +ChainSync cluster_25 - -LocalTxMonitor + +Client cluster_26 - -LocalTxSubmission + +LocalStateQuery cluster_27 - -Node + +LocalTxMonitor cluster_28 - -Protocol + +LocalTxSubmission cluster_29 - -PBFT + +Node cluster_30 - -Storage + +Protocol cluster_31 - -ChainDB + +PBFT cluster_32 - -API + +Storage cluster_33 - -Types + +ChainDB cluster_34 - -Impl + +API cluster_35 - -ImmutableDB + +Types cluster_36 - -Chunks + +Impl cluster_37 - -Impl + +ImmutableDB cluster_38 - -Index + +Chunks cluster_39 - -LedgerDB + +Impl cluster_40 - -VolatileDB + +Index cluster_41 - -Impl + +LedgerDB cluster_42 - -Util + +API cluster_43 - -MonadSTM + +Impl cluster_44 - -NormalForm + +V1 + + +cluster_45 + +BackingStore + + +cluster_46 + +Impl + + +cluster_47 + +LMDB + + +cluster_48 + +V2 + + +cluster_49 + +VolatileDB + + +cluster_50 + +Impl + + +cluster_51 + +Util + + +cluster_52 + +MonadSTM + + +cluster_53 + +NormalForm u18 - -Forecast + +Forecast u70 - -Util + +Util u18->u70 - - + + u73 - -HeaderStateHistory + +HeaderStateHistory u79 - -Extended + +Extended u73->u79 - - + + u74 - -HeaderValidation + +HeaderValidation u7 - -Block + +Block u74->u7 - - + + - - -u170 - -Assert + + +u197 + +Assert - + -u74->u170 - - +u74->u197 + + - - -u171 - -CBOR + + +u198 + +CBOR - + -u74->u171 - - +u74->u198 + + - + -u106 - -NodeId +u113 + +NodeId - - -u173 - -Condense + + +u200 + +Condense - - -u106->u173 - - + + +u113->u200 + + - + -u166 - -Ticked +u193 + +Ticked - + -u167 - -TypeFamilyWrappers +u194 + +TypeFamilyWrappers - - -u167->u74 - - + + +u194->u74 + + u80 - -Inspect + +Inspect - - -u167->u80 - - + + +u194->u80 + + - - -u102 - -NetworkProtocolVersion + + +u109 + +NetworkProtocolVersion - - -u167->u102 - - + + +u194->u109 + + u2 - -Forging + +Forging u7->u2 - - + + u3 - -NestedContent + +NestedContent u7->u3 - - + + u4 - -RealPoint + +RealPoint u7->u4 - - + + u5 - -SupportsMetrics + +SupportsMetrics u7->u5 - - + + u6 - -SupportsProtocol + +SupportsProtocol u7->u6 - - + + u0 - -Abstract + +Abstract u1 - -EBB + +EBB u0->u1 - - + + - + -u1->u173 - - +u1->u200 + + u17 - -Config + +Config u2->u17 - - + + - - -u87 - -Capacity + + +u93 + +Capacity - + -u2->u87 - - +u2->u93 + + - - -u193 - -Util + + +u219 + +Util - + -u3->u193 - - +u3->u219 + + - - -u174 - -DepPair + + +u201 + +DepPair - + -u3->u174 - - +u3->u201 + + u4->u0 - - + + u5->u0 - - + + - - -u107 - -Abstract + + +u114 + +Abstract - + -u6->u107 - - +u6->u114 + + u14 - -BlockchainTime + +BlockchainTime u9 - -Default + +Default u14->u9 - - + + u10 - -HardFork + +HardFork u14->u10 - - + + u11 - -Simple + +Simple u14->u11 - - + + u8 - -API + +API u8->u7 - - + + - - -u187 - -STM + + +u215 + +STM - + -u8->u187 - - +u8->u215 + + u13 - -Util + +Util u9->u13 - - + + - - -u190 - -Time + + +u217 + +Time - + -u9->u190 - - +u9->u217 + + u10->u8 - - + + u10->u13 - - + + u23 - -Abstract + +Abstract u10->u23 - - + + - + -u10->u190 - - +u10->u217 + + u11->u8 - - + + u11->u13 - - + + - + -u11->u190 - - +u11->u217 + + u12 - -Types + +Types u71 - -History + +History u13->u71 - - + + u76 - -Basics + +Basics u17->u76 - - + + - + -u17->u107 - - +u17->u114 + + u15 - -SecurityParam + +SecurityParam u16 - -SupportsNode + +SupportsNode u16->u14 - - + + u19 - -Diff + +Diff u19->u7 - - + + u20 - -InFuture + +InFuture u20->u14 - - + + u21 - -Validated + +Validated u20->u21 - - + + - - -u116 - -InvalidBlockPunishment + + +u123 + +InvalidBlockPunishment - + -u20->u116 - - +u20->u123 + + u21->u7 - - + + - + -u21->u170 - - +u21->u197 + + u22 - -ValidatedDiff + +ValidatedDiff u22->u19 - - + + u22->u21 - - + + u23->u71 - - + + u72 - -Simple + +Simple u64 - -Combinator + +Combinator u48 - -Node + +Node u64->u48 - - + + u28 - -AcrossEras + +AcrossEras u44 - -Lifting + +Lifting u28->u44 - - + + - - -u103 - -ProtocolInfo + + +u110 + +ProtocolInfo - + -u28->u103 - - +u28->u110 + + u29 - -Basics + +Basics u29->u28 - - + + u59 - -Instances + +Instances u29->u59 - - + + u30 - -Block + +Block u30->u29 - - + + u31 - -Compat + +Compat u42 - -Query + +Query u31->u42 - - + + u32 - -Condense + +Condense u32->u64 - - + + u33 - -Degenerate + +Degenerate u36 - -Unary + +Unary u33->u36 - - + + u33->u48 - - + + u37 - -Forging + +Forging u45 - -Mempool + +Mempool u37->u45 - - + + u38 - -Info + +Info u39 - -InjectTxs + +InjectTxs - + -u39->u167 - - +u39->u194 + + u61 - -Types + +Types u39->u61 - - + + u27 - -Abstract + +Abstract u44->u27 - - + + u43 - -Ledger + +Ledger u45->u43 - - + + u49 - -PartialConfig + +PartialConfig u68 - -Qry + +Qry u49->u68 - - + + u63 - -Translation + +Translation - + -u63->u167 - - +u63->u194 + + u63->u61 - - + + u24 - -CanHardFork + +CanHardFork u27->u24 - - + + u25 - -NoHardForks + +NoHardForks u27->u25 - - + + u24->u39 - - + + u24->u63 - - + + u50 - -ChainSel + +ChainSel u24->u50 - - + + u26 - -SingleEraBlock + +SingleEraBlock u25->u26 - - + + u26->u38 - - + + u26->u49 - - + + u77 - -CommonProtocolParams + +CommonProtocolParams u26->u77 - - + + u84 - -SupportsPeerSelection + +SupportsPeerSelection u26->u84 - - + + u82 - -Query + +Query u26->u82 - - + + - - -u101 - -InitStorage + + +u108 + +InitStorage - + -u26->u101 - - +u26->u108 + + u34 - -Binary + +Binary u34->u64 - - + + u35 - -Nary + +Nary u35->u64 - - + + u36->u37 - - + + u36->u42 - - + + u52 - -Protocol + +Protocol u43->u52 - - + + u40 - -CommonProtocolParams + +CommonProtocolParams u40->u43 - - + + u41 - -PeerSelection + +PeerSelection u41->u43 - - + + u42->u43 - - + + u48->u37 - - + + u48->u40 - - + + u48->u41 - - + + u46 - -InitStorage + +InitStorage u48->u46 - - + + u47 - -Metrics + +Metrics u48->u47 - - + + u57 - -Serialisation + +Serialisation u48->u57 - - + + u62 - -State + +State u46->u62 - - + + u47->u30 - - + + u52->u30 - - + + u51 - -LedgerView + +LedgerView u52->u51 - - + + u52->u62 - - + + u50->u26 - - + + u51->u59 - - + + u55 - -SerialiseNodeToClient + +SerialiseNodeToClient u57->u55 - - + + u56 - -SerialiseNodeToNode + +SerialiseNodeToNode u57->u56 - - + + u53 - -Common + +Common u53->u42 - - + + - - -u104 - -Run + + +u111 + +Run - + -u53->u104 - - +u53->u111 + + u54 - -SerialiseDisk + +SerialiseDisk u54->u53 - - + + u55->u45 - - + + u55->u54 - - + + u56->u45 - - + + u56->u54 - - + + u62->u29 - - + + u58 - -Infra + +Infra u62->u58 - - + + u58->u26 - - + + u60 - -Lift + +Lift u58->u60 - - + + u59->u44 - - + + u59->u60 - - + + u60->u61 - - + + u61->u18 - - + + u61->u71 - - + + u65 - -Caching + +Caching u71->u65 - - + + u66 - -EpochInfo + +EpochInfo u71->u66 - - + + u65->u68 - - + + u66->u68 - - + + u67 - -EraParams + +EraParams u67->u7 - - + + u67->u12 - - + + u69 - -Summary + +Summary u68->u69 - - + + u69->u67 - - + + u69->u70 - - + + u70->u7 - - + + - - -u185 - -RedundantConstraints + + +u213 + +RedundantConstraints - + -u70->u185 - - +u70->u213 + + u75 - -Abstract + +Abstract u75->u76 - - + + - - -u76->u166 - - + + +u90 + +Utils - + -u76->u0 - - +u75->u90 + + - - -u76->u193 - - + + +u91 + +Tables + + + +u76->u91 + + - + u77->u75 - - + + u78 - -Dual + +Dual - + u78->u77 - - + + - + u78->u84 - - + + - + u78->u82 - - - - - -u156 - -Serialisation - - - -u78->u156 - - + + u85 - -SupportsProtocol + +SupportsProtocol - + u79->u85 - - + + + + + +u183 + +Serialisation + + + +u79->u183 + + - + u80->u17 - - + + - + u80->u75 - - + + - - -u80->u185 - - + + +u80->u213 + + u83 - -SupportsMempool + +SupportsMempool - + u83->u75 - - - - - -u179 - -IOLike - - - -u83->u179 - - + + - + u84->u75 - - + + - + u85->u18 - - + + - + u85->u74 - - + + - + u82->u16 - - - - - -u82->u79 - - + + u81 - -Version + +Version - + u82->u81 - - + + - - -u105 - -Serialisation + + +u112 + +Serialisation - - -u82->u105 - - + + +u82->u112 + + - - -u93 - -Mempool + + +u182 + +LedgerDB + + + +u82->u182 + + + + + +u87 + +Combinators + + + +u91->u87 + + - + u89 - -Init + +MapKind - - -u93->u89 - - + + +u91->u89 + + u86 - -API - - - -u86->u7 - - - - - -u91 - -TxSeq - - - -u86->u91 - - - - - -u87->u83 - - - - - -u90 - -Query + +Basics - - -u89->u90 - - + + +u86->u193 + + - - -u92 - -Update + + +u87->u86 + + - - -u89->u92 - - + + +u87->u219 + + - + u88 - -Common + +DiffSeq - - -u90->u88 - - + + +u212 + +Orphans - - -u91->u87 - - + + +u88->u212 + + - - -u92->u88 - - + + +u89->u86 + + - + -u88->u86 - - - - - -u130 - -ChainDB +u89->u88 + + - + -u88->u130 - - - - - -u94 - -ClientInterface - - - -u94->u16 - - +u90->u91 + + - - -u117 - -API - - - -u94->u117 - - - - - -u168 - -AnchoredFragment - - - -u94->u168 - - + + +u99 + +Mempool u95 - -Server + +Init - - -u95->u130 - - + + +u99->u95 + + - - -u96 - -Client + + +u92 + +API - - -u96->u130 - - + + +u92->u7 + + u97 - -Server + +TxSeq - - -u97->u117 - - + + +u92->u97 + + - - -u176 - -Enclose + + +u93->u83 + + - - -u97->u176 - - + + +u96 + +Query + + + +u95->u96 + + u98 - -Server + +Update - - -u98->u82 - - + + +u95->u98 + + - + -u99 - -Server +u94 + +Common - - -u99->u93 - - + + +u96->u94 + + - - -u100 - -Server + + +u97->u93 + + - + + +u98->u94 + + + + + +u94->u92 + + + + + +u136 + +ChainDB + + + +u94->u136 + + + + + +u100 + +ClientInterface + + + +u100->u16 + + + + + +u124 + +API + + + +u100->u124 + + + + + +u195 + +AnchoredFragment + + -u100->u86 - - +u100->u195 + + - - -u129 - -Init + + +u101 + +Server - + -u101->u129 - - +u101->u136 + + - - -u147 - -ImmutableDB + + +u104 + +Server - - -u101->u147 - - + + +u104->u124 + + - - -u103->u106 - - + + +u203 + +Enclose + + + +u104->u203 + + + + + +u103 + +Client + + + +u102 + +InFutureCheck - + -u103->u79 - - +u103->u102 + + - + + +u103->u136 + + + + + +u202 + +EarlyExit + + -u104->u77 - - +u103->u202 + + - - -u104->u84 - - + + +u102->u20 + + - - -u104->u82 - - + + +u105 + +Server - + -u104->u101 - - +u105->u82 + + + + + +u106 + +Server - + -u104->u130 - - +u106->u99 + + + + + +u107 + +Server - + -u105->u167 - - +u107->u92 + + - - -u107->u166 - - + + +u135 + +Init - - -u107->u0 - - + + +u108->u135 + + - + -u107->u15 - - - - - -u108 - -BFT +u110->u113 + + - - -u108->u103 - - + + +u110->u79 + + - - -u115 - -Signed + + +u111->u77 + + - + -u108->u115 - - - - - -u109 - -LeaderSchedule +u111->u84 + + - - -u109->u106 - - + + +u111->u82 + + - + -u109->u7 - - +u111->u108 + + - - -u110 - -MockChainSel + + +u111->u136 + + - + -u110->u107 - - - - - -u111 - -ModChainSel - - - -u111->u107 - - +u112->u194 + + - - -u114 - -PBFT + + +u114->u193 + + - - -u114->u103 - - + + +u114->u0 + + - - -u114->u115 - - + + +u114->u15 + + - + -u113 - -State +u115 + +BFT - - -u114->u113 - - + + +u115->u110 + + - - -u112 - -Crypto + + +u122 + +Signed - - -u112->u173 - - + + +u115->u122 + + - - -u113->u7 - - + + +u116 + +LeaderSchedule - - -u113->u112 - - + + +u116->u113 + + - - -u192 - -Versioned + + +u116->u7 + + - - -u113->u192 - - + + +u117 + +MockChainSel - - -u131 - -Common + + +u117->u114 + + - - -u131->u7 - - + + +u118 + +ModChainSel - - -u156->u167 - - + + +u118->u114 + + - - -u156->u131 - - + + +u121 + +PBFT - - -u128 - -Impl + + +u121->u110 + + - - -u130->u128 - - + + +u121->u122 + + - - -u129->u117 - - + + +u120 + +State - - -u117->u73 - - + + +u121->u120 + + - - -u117->u156 - - + + +u119 + +Crypto - + -u117->u116 - - - - - -u155 - -LedgerDB +u119->u200 + + - + -u117->u155 - - +u120->u7 + + - - -u117->u187 - - + + +u120->u119 + + - - -u189 - -TentativeState + + +u218 + +Versioned - - -u116->u189 - - + + +u120->u218 + + - + -u118 - -Args - - - -u128->u118 - - - - - -u119 - -Background - - - -u128->u119 - - - - - -u122 - -Follower - - - -u128->u122 - - +u137 + +Common - - -u123 - -Iterator + + +u137->u7 + + + + + +u183->u194 + + + + + +u183->u137 + + - - -u128->u123 - - + + +u134 + +Impl - - -u127 - -Types + + +u136->u134 + + - - -u118->u127 - - + + +u135->u124 + + - - -u121 - -ChainSel + + +u124->u123 + + - - -u119->u121 - - + + +u124->u182 + + - - -u120 - -BlockCache + + +u124->u215 + + - - -u120->u7 - - + + +u216 + +TentativeState - + -u121->u22 - - +u123->u216 + + u125 - -Paths + +Args - - -u121->u125 - - + + +u134->u125 + + u126 - -Query + +Background - - -u121->u126 - - + + +u134->u126 + + - - -u121->u168 - - + + +u129 + +Follower - - -u122->u126 - - + + +u134->u129 + + - - -u123->u125 - - + + +u130 + +Iterator - - -u123->u127 - - + + +u134->u130 + + - - -u124 - -LgrDB + + +u133 + +Types - - -u124->u117 - - + + +u125->u133 + + - - -u124->u120 - - + + +u128 + +ChainSel - - -u124->u147 - - + + +u126->u128 + + - - -u125->u19 - - + + +u127 + +BlockCache - - -u125->u117 - - + + +u127->u7 + + - - -u165 - -VolatileDB + + +u128->u22 + + + + + +u131 + +Paths + + + +u128->u131 + + + + + +u132 + +Query - + + +u128->u132 + + + + + +u128->u195 + + + + + +u129->u132 + + + + -u125->u165 - - +u130->u131 + + - + -u126->u127 - - +u130->u133 + + - + -u127->u19 - - +u131->u19 + + - + -u127->u20 - - +u131->u124 + + + + + +u192 + +VolatileDB - + -u127->u124 - - +u131->u192 + + - + -u127->u165 - - +u132->u133 + + - + -u127->u176 - - - - - -u146 - -Impl +u133->u19 + + - - -u147->u146 - - - - - -u132 - -API - - - -u132->u131 - - - - - -u186 - -ResourceRegistry - - - -u132->u186 - - + + +u133->u20 + + - - -u135 - -Chunks + + +u133->u124 + + - - -u134 - -Layout + + +u133->u192 + + - - -u135->u134 - - + + +u133->u203 + + - - -u133 - -Internal + + +u154 + +ImmutableDB - - -u133->u7 - - + + +u153 + +Impl - - -u172 - -CallStack + + +u154->u153 + + - - -u133->u172 - - + + +u138 + +API - + -u133->u185 - - +u138->u137 + + - - -u143 - -Types + + +u214 + +ResourceRegistry - + -u134->u143 - - +u138->u214 + + + + + +u141 + +Chunks - + u140 - -Iterator + +Layout - - -u146->u140 - - + + +u141->u140 + + - - -u145 - -Validation + + +u139 + +Internal - - -u146->u145 - - + + +u139->u7 + + - - -u169 - -Args + + +u199 + +CallStack - - -u146->u169 - - + + +u139->u199 + + - - -u140->u156 - - + + +u139->u213 + + - - -u142 - -State + + +u150 + +Types - - -u140->u142 - - + + +u140->u150 + + - - -u141 - -Parser + + +u146 + +Iterator - - -u141->u156 - - + + +u153->u146 + + - - -u138 - -Secondary + + +u152 + +Validation - - -u141->u138 - - + + +u153->u152 + + - - -u139 - -Index + + +u196 + +Args - - -u142->u139 - - + + +u153->u196 + + - + -u143->u132 - - +u146->u183 + + - - -u143->u133 - - + + +u148 + +State + + + +u146->u148 + + + + + +u147 + +Parser - + -u143->u171 - - +u147->u183 + + - + u144 - -Util + +Secondary - + + +u147->u144 + + + + + +u145 + +Index + + -u144->u132 - - +u148->u145 + + - + + +u149 + +Stream + + -u144->u133 - - +u149->u154 + + - + -u145->u141 - - +u150->u138 + + - + -u145->u142 - - - - - -u136 - -Cache +u150->u139 + + - - -u139->u136 - - - - - -u136->u138 - - - - - -u137 - -Primary + + +u150->u198 + + - - -u137->u135 - - + + +u151 + +Util - - -u137->u144 - - + + +u151->u138 + + - - -u138->u137 - - + + +u151->u139 + + - - -u149 - -Init + + +u152->u147 + + - - -u155->u149 - - + + +u152->u148 + + - - -u148 - -DiskPolicy + + +u142 + +Cache - - -u148->u15 - - + + +u145->u142 + + - - -u149->u80 - - + + +u142->u144 + + - + -u152 - -Snapshots +u143 + +Primary - - -u149->u152 - - + + +u143->u141 + + - - -u153 - -Stream + + +u143->u151 + + - - -u149->u153 - - + + +u144->u143 + + - - -u154 - -Update + + +u173 + +Init - - -u149->u154 - - + + +u182->u173 + + - - -u150 - -LedgerDB + + +u179 + +Init - - -u150->u79 - - + + +u182->u179 + + - - -u151 - -Query + + +u156 + +API - + -u151->u150 - - +u156->u73 + + - + -u152->u79 - - +u156->u127 + + - + + +u160 + +Snapshots + + -u152->u148 - - +u156->u160 + + - + -u152->u192 - - +u156->u214 + + - - -u153->u7 - - - - - -u154->u151 - - - - - -u164 - -Impl + + +u155 + +Config - - -u165->u164 - - + + +u155->u79 + + - + u157 - -API - - - -u157->u131 - - - - - -u161 - -State - - - -u164->u161 - - + +Args - - -u164->u169 - - + + +u157->u155 + + u158 - -FileInfo + +Common - - -u160 - -Parser + + +u157->u158 + + - - -u158->u160 - - + + +u158->u156 + + + + + +u168 + +BackingStore + + + +u158->u168 + + + + + +u176 + +Args + + + +u158->u176 + + u159 - -Index + +Init + + + +u159->u149 + + + + + +u159->u157 + + - + -u159->u158 - - +u160->u79 + + - + -u160->u156 - - - - - -u162 - -Types +u160->u199 + + - + -u160->u162 - - +u160->u218 + + - - -u161->u159 - - + + +u161 + +Validate - - -u163 - -Util + + +u161->u156 + + - - -u161->u163 - - + + +u162 + +Args - - -u161->u186 - - + + +u167 + +LMDB - - -u181 - -RAWLock + + +u162->u167 + + - + -u161->u181 - - - - - -u162->u157 - - +u162->u196 + + - - -u162->u171 - - + + +u169 + +Common - - -u163->u162 - - + + +u169->u158 + + - + -u168->u7 - - +u169->u161 + + - - -u168->u170 - - + + +u170 + +DbChangelog - - -u170->u185 - - + + +u169->u170 + + - - -u171->u179 - - + + +u174 + +Lock - - -u178 - -HList + + +u169->u174 + + - + -u173->u178 - - +u170->u156 + + - - -u175 - -EarlyExit + + +u170->u155 + + - - -u175->u193 - - + + +u163 + +API - + -u175->u179 - - +u170->u163 + + - - -u177 - -FileLock + + +u171 + +Flush - - -u184 - -Orphans + + +u171->u170 + + - + -u179->u184 - - +u171->u174 + + - - -u183 - -StrictMVar + + +u171->u168 + + - - -u179->u183 - - + + +u172 + +Forker - - -u184->u0 - - + + +u172->u169 + + - - -u180 - -NormalForm + + +u173->u159 + + + + + +u173->u171 + + - + -u184->u180 - - +u173->u172 + + - - -u186->u193 - - + + +u175 + +Snapshots - + -u186->u172 - - +u173->u175 + + + + + +u208 + +RAWLock - + -u186->u179 - - +u174->u208 + + + + + +u175->u158 + + - + -u187->u186 - - +u175->u170 + + - + + +u175->u174 + + + + + +u168->u162 + + + + + +u164 + +InMemory + + + +u168->u164 + + + + + +u163->u79 + + + + + +u164->u163 + + + + + +u167->u163 + + + + + +u165 + +Bridge + + + +u167->u165 + + + + + +u166 + +Status + + + +u167->u166 + + + + + +u165->u91 + + + + + +u166->u208 + + + + + +u177 + +Common + + + +u177->u158 + + + + + +u177->u161 + + + + -u188 - -Singletons +u181 + +LedgerSeq - - -u189->u85 - - + + +u177->u181 + + + + + +u178 + +InMemory + + + +u178->u158 + + + + + +u178->u181 + + + + + +u179->u159 + + + + + +u179->u177 + + + + + +u179->u178 + + + + + +u180 + +LSM + + + +u180->u158 + + + + + +u180->u181 + + + + + +u181->u155 + + + + + +u181->u214 + + u191 - -TraceSize + +Impl - - -u191->u155 - - + + +u192->u191 + + - + + +u184 + +API + + + +u184->u137 + + + + -u182 - -StrictSVar +u188 + +State - - -u180->u182 - - + + +u191->u188 + + - - -u181->u179 - - + + +u191->u196 + + + + + +u185 + +FileInfo + + + +u187 + +Parser + + + +u185->u187 + + + + + +u186 + +Index + + + +u186->u185 + + + + + +u187->u183 + + + + + +u189 + +Types + + + +u187->u189 + + + + + +u188->u186 + + + + + +u190 + +Util + + + +u188->u190 + + + + + +u188->u214 + + + + + +u188->u208 + + + + + +u189->u184 + + + + + +u189->u198 + + + + + +u190->u189 + + + + + +u206 + +IOLike + + + +u219->u206 + + + + + +u195->u7 + + + + + +u195->u197 + + + + + +u197->u213 + + + + + +u198->u206 + + + + + +u205 + +HList + + + +u200->u205 + + + + + +u202->u219 + + + + + +u204 + +FileLock + + + +u206->u212 + + + + + +u207 + +NormalForm + + + +u206->u207 + + + + + +u211 + +StrictTVar + + + +u206->u211 + + + + + +u212->u0 + + + + + +u214->u219 + + + + + +u214->u199 + + + + + +u215->u214 + + + + + +u216->u85 + + + + + +u209 + +StrictSVar + + + +u207->u209 + + + + + +u208->u206 + + + + + +u210 + +StrictMVar + + + +u211->u210 + + diff --git a/sop-extras/src/Data/SOP/Functors.hs b/sop-extras/src/Data/SOP/Functors.hs index 25bdf23f71..70957be43a 100644 --- a/sop-extras/src/Data/SOP/Functors.hs +++ b/sop-extras/src/Data/SOP/Functors.hs @@ -5,7 +5,6 @@ module Data.SOP.Functors ( Flip (..) - , K2 (..) , Product2 (..) , snd2 ) where @@ -26,6 +25,3 @@ snd2 (Pair2 _ g) = g type Flip :: (x -> y -> Type) -> y -> x -> Type newtype Flip f x y = Flip {unFlip :: f y x} deriving (Eq, Generic, NoThunks, Show) - -type K2 :: Type -> x -> y -> Type -newtype K2 a b c = K2 a