From bd2912781ba4af729692ae0c13063634e177f31f Mon Sep 17 00:00:00 2001 From: Ruben Buniatyan Date: Sat, 14 Sep 2024 14:51:25 +0200 Subject: [PATCH 01/14] Switch to .NET 9 --- global.json | 4 ++-- src/Nethermind/Directory.Build.props | 4 ++-- src/Nethermind/Directory.Packages.props | 12 ++++++------ .../Nethermind.ExternalSigner.Plugin.csproj | 1 - 4 files changed, 10 insertions(+), 11 deletions(-) diff --git a/global.json b/global.json index c33ebd20493..38ab05dd0a5 100644 --- a/global.json +++ b/global.json @@ -1,7 +1,7 @@ { "sdk": { - "version": "8.0.0", - "allowPrerelease": false, + "version": "9.0.0", + "allowPrerelease": true, "rollForward": "latestFeature" } } diff --git a/src/Nethermind/Directory.Build.props b/src/Nethermind/Directory.Build.props index 86fb341f764..4550f23652b 100644 --- a/src/Nethermind/Directory.Build.props +++ b/src/Nethermind/Directory.Build.props @@ -4,8 +4,8 @@ Debug true latest - net8.0 - true + net9.0 + true diff --git a/src/Nethermind/Directory.Packages.props b/src/Nethermind/Directory.Packages.props index 6b405f92b29..88a5c6af6d9 100644 --- a/src/Nethermind/Directory.Packages.props +++ b/src/Nethermind/Directory.Packages.props @@ -26,8 +26,8 @@ - - + + @@ -39,7 +39,7 @@ - + @@ -70,10 +70,10 @@ - - + + - + diff --git a/src/Nethermind/Nethermind.ExternalSigner.Plugin/Nethermind.ExternalSigner.Plugin.csproj b/src/Nethermind/Nethermind.ExternalSigner.Plugin/Nethermind.ExternalSigner.Plugin.csproj index 31ecca4eede..59e2b3e5938 100644 --- a/src/Nethermind/Nethermind.ExternalSigner.Plugin/Nethermind.ExternalSigner.Plugin.csproj +++ b/src/Nethermind/Nethermind.ExternalSigner.Plugin/Nethermind.ExternalSigner.Plugin.csproj @@ -1,7 +1,6 @@  - net8.0 enable enable From 35bcaac3b6d2c18a1308c66779058ad3fce58d8e Mon Sep 17 00:00:00 2001 From: Ruben Buniatyan Date: Thu, 19 Sep 2024 22:50:55 +0200 Subject: [PATCH 02/14] Update packages --- src/Nethermind/Directory.Packages.props | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Nethermind/Directory.Packages.props b/src/Nethermind/Directory.Packages.props index 88a5c6af6d9..9aaf0c1d3c6 100644 --- a/src/Nethermind/Directory.Packages.props +++ b/src/Nethermind/Directory.Packages.props @@ -37,8 +37,8 @@ - - + + From f9455ea584c041e99dd443685f294995dd8c003a Mon Sep 17 00:00:00 2001 From: Ruben Buniatyan Date: Thu, 26 Sep 2024 20:07:07 +0200 Subject: [PATCH 03/14] Update Docker files --- Dockerfile | 4 ++-- Dockerfile.chiseled | 4 ++-- Dockerfile.diag | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/Dockerfile b/Dockerfile index ccdea679109..1b06cdb804a 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,7 +1,7 @@ # SPDX-FileCopyrightText: 2022 Demerzel Solutions Limited # SPDX-License-Identifier: LGPL-3.0-only -FROM --platform=$BUILDPLATFORM mcr.microsoft.com/dotnet/sdk:8.0-noble AS build +FROM --platform=$BUILDPLATFORM mcr.microsoft.com/dotnet/sdk:9.0-noble AS build ARG BUILD_CONFIG=release ARG BUILD_TIMESTAMP @@ -18,7 +18,7 @@ RUN arch=$([ "$TARGETARCH" = "amd64" ] && echo "x64" || echo "$TARGETARCH") && \ # A temporary symlink to support the old executable name RUN ln -s -r /publish/nethermind /publish/Nethermind.Runner -FROM --platform=$TARGETPLATFORM mcr.microsoft.com/dotnet/aspnet:8.0-noble +FROM --platform=$TARGETPLATFORM mcr.microsoft.com/dotnet/aspnet:9.0-noble WORKDIR /nethermind diff --git a/Dockerfile.chiseled b/Dockerfile.chiseled index cd327a31b8e..ae64366ae20 100644 --- a/Dockerfile.chiseled +++ b/Dockerfile.chiseled @@ -1,7 +1,7 @@ # SPDX-FileCopyrightText: 2024 Demerzel Solutions Limited # SPDX-License-Identifier: LGPL-3.0-only -FROM --platform=$BUILDPLATFORM mcr.microsoft.com/dotnet/sdk:8.0-noble AS build +FROM --platform=$BUILDPLATFORM mcr.microsoft.com/dotnet/sdk:9.0-noble AS build ARG BUILD_CONFIG=release ARG BUILD_TIMESTAMP @@ -21,7 +21,7 @@ RUN cd /publish && \ mkdir logs && \ mkdir nethermind_db -FROM --platform=$TARGETPLATFORM mcr.microsoft.com/dotnet/aspnet:8.0-noble-chiseled +FROM --platform=$TARGETPLATFORM mcr.microsoft.com/dotnet/aspnet:9.0-noble-chiseled WORKDIR /nethermind diff --git a/Dockerfile.diag b/Dockerfile.diag index 768e8fa9c2e..4d18729150c 100644 --- a/Dockerfile.diag +++ b/Dockerfile.diag @@ -1,7 +1,7 @@ # SPDX-FileCopyrightText: 2022 Demerzel Solutions Limited # SPDX-License-Identifier: LGPL-3.0-only -FROM --platform=$BUILDPLATFORM mcr.microsoft.com/dotnet/sdk:8.0-noble AS build +FROM --platform=$BUILDPLATFORM mcr.microsoft.com/dotnet/sdk:9.0-noble AS build ARG BUILD_CONFIG=release ARG BUILD_TIMESTAMP @@ -22,7 +22,7 @@ RUN dotnet tool install -g dotnet-dump && \ dotnet tool install -g dotnet-trace && \ dotnet tool install -g JetBrains.dotTrace.GlobalTools -FROM --platform=$TARGETPLATFORM mcr.microsoft.com/dotnet/aspnet:8.0-noble +FROM --platform=$TARGETPLATFORM mcr.microsoft.com/dotnet/aspnet:9.0-noble WORKDIR /nethermind From bd43063d374afbf107d8813ad2946ec7e499e3f4 Mon Sep 17 00:00:00 2001 From: Ruben Buniatyan Date: Mon, 7 Oct 2024 23:31:36 +0200 Subject: [PATCH 04/14] Update tools' target framework --- tools/HiveCompare/HiveCompare/HiveCompare.csproj | 2 +- .../HiveConsensusWorkflowGenerator.csproj | 2 +- tools/Nethermind.Tools.Kute/Nethermind.Tools.Kute.csproj | 2 +- tools/SendBlobs/SendBlobs.csproj | 2 +- tools/TxParser/TxParser.csproj | 2 +- tools/docgen/DocGen.csproj | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) diff --git a/tools/HiveCompare/HiveCompare/HiveCompare.csproj b/tools/HiveCompare/HiveCompare/HiveCompare.csproj index aa57c232794..26b1167908b 100644 --- a/tools/HiveCompare/HiveCompare/HiveCompare.csproj +++ b/tools/HiveCompare/HiveCompare/HiveCompare.csproj @@ -2,7 +2,7 @@ Exe - net8.0 + net9.0 enable enable diff --git a/tools/HiveConsensusWorkflowGenerator/HiveConsensusWorkflowGenerator.csproj b/tools/HiveConsensusWorkflowGenerator/HiveConsensusWorkflowGenerator.csproj index 217877d9884..c1883221982 100644 --- a/tools/HiveConsensusWorkflowGenerator/HiveConsensusWorkflowGenerator.csproj +++ b/tools/HiveConsensusWorkflowGenerator/HiveConsensusWorkflowGenerator.csproj @@ -2,7 +2,7 @@ Exe - net8.0 + net9.0 enable enable HiveConsensusWorkflowGenerator diff --git a/tools/Nethermind.Tools.Kute/Nethermind.Tools.Kute.csproj b/tools/Nethermind.Tools.Kute/Nethermind.Tools.Kute.csproj index 0e5870bdf32..9f7d0d09354 100644 --- a/tools/Nethermind.Tools.Kute/Nethermind.Tools.Kute.csproj +++ b/tools/Nethermind.Tools.Kute/Nethermind.Tools.Kute.csproj @@ -2,7 +2,7 @@ Exe - net8.0 + net9.0 enable enable diff --git a/tools/SendBlobs/SendBlobs.csproj b/tools/SendBlobs/SendBlobs.csproj index 93c44067294..eeee5117532 100644 --- a/tools/SendBlobs/SendBlobs.csproj +++ b/tools/SendBlobs/SendBlobs.csproj @@ -3,7 +3,7 @@ Exe - net8.0 + net9.0 enable enable true diff --git a/tools/TxParser/TxParser.csproj b/tools/TxParser/TxParser.csproj index 08e4356ef34..3c04fd90cd7 100644 --- a/tools/TxParser/TxParser.csproj +++ b/tools/TxParser/TxParser.csproj @@ -11,7 +11,7 @@ Exe - net8.0 + net9.0 enable enable diff --git a/tools/docgen/DocGen.csproj b/tools/docgen/DocGen.csproj index 1890c043d16..32a0267c61f 100644 --- a/tools/docgen/DocGen.csproj +++ b/tools/docgen/DocGen.csproj @@ -2,7 +2,7 @@ Exe - net8.0 + net9.0 enable enable From ea2f47c70751f0712309ea140bd25079eb3d3b7d Mon Sep 17 00:00:00 2001 From: Ruben Buniatyan Date: Fri, 11 Oct 2024 00:05:28 +0200 Subject: [PATCH 05/14] Add `Directory.Build.props` to tools --- tools/Directory.Build.props | 13 +++++++++++++ .../{HiveCompare => }/HiveCompare.csproj | 3 --- tools/HiveCompare/HiveCompare.sln | 12 ++++++------ .../{HiveCompare => }/Models/HiveTestResult.cs | 0 tools/HiveCompare/{HiveCompare => }/Program.cs | 0 .../HiveConsensusWorkflowGenerator.csproj | 3 --- .../Nethermind.Tools.Kute.csproj | 3 --- tools/SendBlobs/SendBlobs.csproj | 4 ---- tools/TxParser/TxParser.csproj | 11 ++++------- tools/docgen/DocGen.csproj | 3 --- 10 files changed, 23 insertions(+), 29 deletions(-) create mode 100644 tools/Directory.Build.props rename tools/HiveCompare/{HiveCompare => }/HiveCompare.csproj (65%) rename tools/HiveCompare/{HiveCompare => }/Models/HiveTestResult.cs (100%) rename tools/HiveCompare/{HiveCompare => }/Program.cs (100%) diff --git a/tools/Directory.Build.props b/tools/Directory.Build.props new file mode 100644 index 00000000000..1900f35c2c2 --- /dev/null +++ b/tools/Directory.Build.props @@ -0,0 +1,13 @@ + + + + Debug + latest + net9.0 + enable + enable + + true + + + diff --git a/tools/HiveCompare/HiveCompare/HiveCompare.csproj b/tools/HiveCompare/HiveCompare.csproj similarity index 65% rename from tools/HiveCompare/HiveCompare/HiveCompare.csproj rename to tools/HiveCompare/HiveCompare.csproj index 26b1167908b..7df00c4ea17 100644 --- a/tools/HiveCompare/HiveCompare/HiveCompare.csproj +++ b/tools/HiveCompare/HiveCompare.csproj @@ -2,9 +2,6 @@ Exe - net9.0 - enable - enable diff --git a/tools/HiveCompare/HiveCompare.sln b/tools/HiveCompare/HiveCompare.sln index 185a56d11b6..6cbb5651689 100644 --- a/tools/HiveCompare/HiveCompare.sln +++ b/tools/HiveCompare/HiveCompare.sln @@ -1,9 +1,9 @@  Microsoft Visual Studio Solution File, Format Version 12.00 -# Visual Studio Version 17 +# 17 VisualStudioVersion = 17.4.33122.133 MinimumVisualStudioVersion = 10.0.40219.1 -Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "HiveCompare", "HiveCompare\HiveCompare.csproj", "{D4F2D91C-F015-4070-BE34-C98C0E930279}" +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "HiveCompare", "HiveCompare.csproj", "{FCFD5C39-B554-4AA5-98F7-6539A950F8EE}" EndProject Global GlobalSection(SolutionConfigurationPlatforms) = preSolution @@ -11,10 +11,10 @@ Global Release|Any CPU = Release|Any CPU EndGlobalSection GlobalSection(ProjectConfigurationPlatforms) = postSolution - {D4F2D91C-F015-4070-BE34-C98C0E930279}.Debug|Any CPU.ActiveCfg = Debug|Any CPU - {D4F2D91C-F015-4070-BE34-C98C0E930279}.Debug|Any CPU.Build.0 = Debug|Any CPU - {D4F2D91C-F015-4070-BE34-C98C0E930279}.Release|Any CPU.ActiveCfg = Release|Any CPU - {D4F2D91C-F015-4070-BE34-C98C0E930279}.Release|Any CPU.Build.0 = Release|Any CPU + {FCFD5C39-B554-4AA5-98F7-6539A950F8EE}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {FCFD5C39-B554-4AA5-98F7-6539A950F8EE}.Debug|Any CPU.Build.0 = Debug|Any CPU + {FCFD5C39-B554-4AA5-98F7-6539A950F8EE}.Release|Any CPU.ActiveCfg = Release|Any CPU + {FCFD5C39-B554-4AA5-98F7-6539A950F8EE}.Release|Any CPU.Build.0 = Release|Any CPU EndGlobalSection GlobalSection(SolutionProperties) = preSolution HideSolutionNode = FALSE diff --git a/tools/HiveCompare/HiveCompare/Models/HiveTestResult.cs b/tools/HiveCompare/Models/HiveTestResult.cs similarity index 100% rename from tools/HiveCompare/HiveCompare/Models/HiveTestResult.cs rename to tools/HiveCompare/Models/HiveTestResult.cs diff --git a/tools/HiveCompare/HiveCompare/Program.cs b/tools/HiveCompare/Program.cs similarity index 100% rename from tools/HiveCompare/HiveCompare/Program.cs rename to tools/HiveCompare/Program.cs diff --git a/tools/HiveConsensusWorkflowGenerator/HiveConsensusWorkflowGenerator.csproj b/tools/HiveConsensusWorkflowGenerator/HiveConsensusWorkflowGenerator.csproj index c1883221982..03a20e3003b 100644 --- a/tools/HiveConsensusWorkflowGenerator/HiveConsensusWorkflowGenerator.csproj +++ b/tools/HiveConsensusWorkflowGenerator/HiveConsensusWorkflowGenerator.csproj @@ -2,9 +2,6 @@ Exe - net9.0 - enable - enable HiveConsensusWorkflowGenerator diff --git a/tools/Nethermind.Tools.Kute/Nethermind.Tools.Kute.csproj b/tools/Nethermind.Tools.Kute/Nethermind.Tools.Kute.csproj index 9f7d0d09354..01a6095ad29 100644 --- a/tools/Nethermind.Tools.Kute/Nethermind.Tools.Kute.csproj +++ b/tools/Nethermind.Tools.Kute/Nethermind.Tools.Kute.csproj @@ -2,9 +2,6 @@ Exe - net9.0 - enable - enable diff --git a/tools/SendBlobs/SendBlobs.csproj b/tools/SendBlobs/SendBlobs.csproj index eeee5117532..dc4e3b76ab0 100644 --- a/tools/SendBlobs/SendBlobs.csproj +++ b/tools/SendBlobs/SendBlobs.csproj @@ -1,11 +1,7 @@ - Exe - net9.0 - enable - enable true true *.pdb diff --git a/tools/TxParser/TxParser.csproj b/tools/TxParser/TxParser.csproj index 3c04fd90cd7..828a5ae38f1 100644 --- a/tools/TxParser/TxParser.csproj +++ b/tools/TxParser/TxParser.csproj @@ -1,5 +1,9 @@ + + Exe + + @@ -9,11 +13,4 @@ - - Exe - net9.0 - enable - enable - - diff --git a/tools/docgen/DocGen.csproj b/tools/docgen/DocGen.csproj index 32a0267c61f..c2067882fda 100644 --- a/tools/docgen/DocGen.csproj +++ b/tools/docgen/DocGen.csproj @@ -2,9 +2,6 @@ Exe - net9.0 - enable - enable From 2e6d4c5520da6bf66b8a1ce0658f870b65879ee7 Mon Sep 17 00:00:00 2001 From: "lukasz.rozmej" Date: Fri, 11 Oct 2024 00:43:42 +0200 Subject: [PATCH 06/14] Optimize Tasks collections in .Net 9 --- .../Processing/BlockchainProcessor.cs | 2 +- .../Producers/MultipleBlockProducer.cs | 14 +++++------- .../Encoding/TxDecoderTests.cs | 8 +++---- .../PubSub/CompositePublisher.cs | 22 +++++++------------ .../Nethermind.Db/RocksDbInitializer.cs | 7 +++--- .../Tracing/GethLikeCallTracerTests.cs | 12 +++------- .../NodesLocator.cs | 6 +++-- .../Nethermind.Network/CompositeNodeSource.cs | 9 ++++---- .../Nethermind.Network/PeerManager.cs | 8 ++++--- .../Nethermind.Network/SessionMonitor.cs | 3 ++- .../Trie/TrieNodeRecovery.cs | 6 +++-- .../Nethermind.Trie/BatchedTrieVisitor.cs | 7 +++--- .../Nethermind.Trie/PatriciaTree.cs | 3 +-- .../Nethermind.Trie/Pruning/TrieStore.cs | 6 +++-- 14 files changed, 53 insertions(+), 60 deletions(-) diff --git a/src/Nethermind/Nethermind.Consensus/Processing/BlockchainProcessor.cs b/src/Nethermind/Nethermind.Consensus/Processing/BlockchainProcessor.cs index 6598b9798a8..3ecd07927d0 100644 --- a/src/Nethermind/Nethermind.Consensus/Processing/BlockchainProcessor.cs +++ b/src/Nethermind/Nethermind.Consensus/Processing/BlockchainProcessor.cs @@ -167,7 +167,7 @@ public async Task StopAsync(bool processRemainingBlocks = false) _blockQueue.CompleteAdding(); } - await Task.WhenAll((_recoveryTask ?? Task.CompletedTask), (_processorTask ?? Task.CompletedTask)); + await Task.WhenAll(_recoveryTask ?? Task.CompletedTask, _processorTask ?? Task.CompletedTask); if (_logger.IsInfo) _logger.Info("Blockchain Processor shutdown complete.. please wait for all components to close"); } diff --git a/src/Nethermind/Nethermind.Consensus/Producers/MultipleBlockProducer.cs b/src/Nethermind/Nethermind.Consensus/Producers/MultipleBlockProducer.cs index 57836b2e474..13f9f26e28a 100644 --- a/src/Nethermind/Nethermind.Consensus/Producers/MultipleBlockProducer.cs +++ b/src/Nethermind/Nethermind.Consensus/Producers/MultipleBlockProducer.cs @@ -7,6 +7,7 @@ using System.Threading; using System.Threading.Tasks; using Nethermind.Core; +using Nethermind.Core.Collections; using Nethermind.Evm.Tracing; using Nethermind.Logging; @@ -32,23 +33,20 @@ protected MultipleBlockProducer( public async Task BuildBlock(BlockHeader? parentHeader, IBlockTracer? blockTracer = null, PayloadAttributes? payloadAttributes = null, CancellationToken? token = null) { - Task[] produceTasks = new Task[_blockProducers.Length]; + using ArrayPoolList> produceTasks = new(_blockProducers.Length); for (int i = 0; i < _blockProducers.Length; i++) { T blockProducerInfo = _blockProducers[i]; - if (!blockProducerInfo.Condition.CanProduce(parentHeader)) - { - produceTasks[i] = Task.FromResult(null); - continue; - } - produceTasks[i] = blockProducerInfo.BlockProducer.BuildBlock(parentHeader, blockProducerInfo.BlockTracer, cancellationToken: token); + produceTasks.Add(!blockProducerInfo.Condition.CanProduce(parentHeader!) + ? Task.FromResult(null) + : blockProducerInfo.BlockProducer.BuildBlock(parentHeader, blockProducerInfo.BlockTracer, cancellationToken: token)); } IEnumerable<(Block? Block, T BlockProducer)> blocksWithProducers; try { - Block?[] blocks = await Task.WhenAll(produceTasks); + Block?[] blocks = await Task.WhenAll(produceTasks.AsSpan()); blocksWithProducers = blocks.Zip(_blockProducers); } catch (OperationCanceledException) diff --git a/src/Nethermind/Nethermind.Core.Test/Encoding/TxDecoderTests.cs b/src/Nethermind/Nethermind.Core.Test/Encoding/TxDecoderTests.cs index 2bf4116bab7..00ed095d83a 100644 --- a/src/Nethermind/Nethermind.Core.Test/Encoding/TxDecoderTests.cs +++ b/src/Nethermind/Nethermind.Core.Test/Encoding/TxDecoderTests.cs @@ -82,12 +82,10 @@ public void CanCorrectlyCalculateTxHash_when_called_concurrently((Transaction Tx IEnumerable>>> tasks = Enumerable .Range(0, 32) - .Select((_) => - Task.Factory - .StartNew(() => decodedTx.Hash.Should().Be(expectedHash), - TaskCreationOptions.RunContinuationsAsynchronously)); + .Select(_ => Task.Factory.StartNew(() => decodedTx.Hash.Should().Be(expectedHash), + TaskCreationOptions.RunContinuationsAsynchronously)); - Task.WaitAll(tasks.ToArray()); + Task.WaitAll(tasks); } [TestCaseSource(nameof(TestCaseSource))] diff --git a/src/Nethermind/Nethermind.Core/PubSub/CompositePublisher.cs b/src/Nethermind/Nethermind.Core/PubSub/CompositePublisher.cs index cfb3541fa4b..02e1a8addc5 100644 --- a/src/Nethermind/Nethermind.Core/PubSub/CompositePublisher.cs +++ b/src/Nethermind/Nethermind.Core/PubSub/CompositePublisher.cs @@ -1,34 +1,28 @@ // SPDX-FileCopyrightText: 2022 Demerzel Solutions Limited // SPDX-License-Identifier: LGPL-3.0-only +using System; using System.Threading.Tasks; +using Nethermind.Core.Collections; namespace Nethermind.Core.PubSub { - public class CompositePublisher : IPublisher + public class CompositePublisher(params IPublisher[] publishers) : IPublisher { - private readonly IPublisher[] _publishers; - - public CompositePublisher(params IPublisher[] publishers) - { - _publishers = publishers; - } - public async Task PublishAsync(T data) where T : class { - // TODO: .Net 9 stackalloc - Task[] tasks = new Task[_publishers.Length]; - for (int i = 0; i < _publishers.Length; i++) + using ArrayPoolList tasks = new(publishers.Length); + for (int i = 0; i < publishers.Length; i++) { - tasks[i] = _publishers[i].PublishAsync(data); + tasks.Add(publishers[i].PublishAsync(data)); } - await Task.WhenAll(tasks); + await Task.WhenAll(tasks.AsSpan()); } public void Dispose() { - foreach (IPublisher publisher in _publishers) + foreach (IPublisher publisher in publishers) { publisher.Dispose(); } diff --git a/src/Nethermind/Nethermind.Db/RocksDbInitializer.cs b/src/Nethermind/Nethermind.Db/RocksDbInitializer.cs index 9e0e61cf742..26be4d0ff27 100644 --- a/src/Nethermind/Nethermind.Db/RocksDbInitializer.cs +++ b/src/Nethermind/Nethermind.Db/RocksDbInitializer.cs @@ -4,6 +4,7 @@ using System; using System.Collections.Generic; using System.Threading.Tasks; +using Nethermind.Core.Collections; namespace Nethermind.Db { @@ -68,13 +69,13 @@ protected void InitAll() protected async Task InitAllAsync() { - HashSet allInitializers = new(); - foreach (var registration in _registrations) + using ArrayPoolList allInitializers = new(_registrations.Count); + foreach (Action registration in _registrations) { allInitializers.Add(Task.Run(() => registration.Invoke())); } - await Task.WhenAll(allInitializers); + await Task.WhenAll(allInitializers.AsSpan()); } protected static string GetTitleDbName(string dbName) => char.ToUpper(dbName[0]) + dbName[1..]; diff --git a/src/Nethermind/Nethermind.Evm.Test/Tracing/GethLikeCallTracerTests.cs b/src/Nethermind/Nethermind.Evm.Test/Tracing/GethLikeCallTracerTests.cs index af49a60c100..c356ce7adc9 100644 --- a/src/Nethermind/Nethermind.Evm.Test/Tracing/GethLikeCallTracerTests.cs +++ b/src/Nethermind/Nethermind.Evm.Test/Tracing/GethLikeCallTracerTests.cs @@ -17,7 +17,7 @@ namespace Nethermind.Evm.Test.Tracing; [TestFixture] public class GethLikeCallTracerTests : VirtualMachineTestsBase { - private static readonly JsonSerializerOptions SerializerOptions = EthereumJsonSerializer.JsonOptionsIndented; + private static readonly JsonSerializerOptions SerializerOptions = new(EthereumJsonSerializer.JsonOptionsIndented) { NewLine = "\n"}; private const string? WithLog = """{"withLog":true}"""; private const string? OnlyTopCall = """{"onlyTopCall":true}"""; private const string? WithLogAndOnlyTopCall = """{"withLog":true,"onlyTopCall":true}"""; @@ -27,14 +27,8 @@ private string ExecuteCallTrace(byte[] code, string? tracerConfig = null) (_, Transaction tx) = PrepareTx(MainnetSpecProvider.CancunActivation, 100000, code); NativeCallTracer tracer = new(tx, GetGethTraceOptions(tracerConfig)); - GethLikeTxTrace callTrace = Execute( - tracer, - code, - MainnetSpecProvider.CancunActivation) - .BuildResult(); - return JsonSerializer.Serialize(callTrace.CustomTracerResult?.Value, SerializerOptions) - // fix for windows, can be done better in .NET 9: https://github.com/dotnet/runtime/issues/84117 - .ReplaceLineEndings("\n"); + GethLikeTxTrace callTrace = Execute(tracer, code, MainnetSpecProvider.CancunActivation).BuildResult(); + return JsonSerializer.Serialize(callTrace.CustomTracerResult?.Value, SerializerOptions); } private static GethTraceOptions GetGethTraceOptions(string? config) => GethTraceOptions.Default with diff --git a/src/Nethermind/Nethermind.Network.Discovery/NodesLocator.cs b/src/Nethermind/Nethermind.Network.Discovery/NodesLocator.cs index ef0a4b3d7f2..254bedef24b 100644 --- a/src/Nethermind/Nethermind.Network.Discovery/NodesLocator.cs +++ b/src/Nethermind/Nethermind.Network.Discovery/NodesLocator.cs @@ -3,7 +3,9 @@ using System.Text; using Nethermind.Core; +using Nethermind.Core.Collections; using Nethermind.Core.Crypto; +using Nethermind.Core.Extensions; using Nethermind.Logging; using Nethermind.Network.Discovery.Lifecycle; using Nethermind.Network.Discovery.Messages; @@ -128,8 +130,8 @@ public async Task LocateNodesAsync(byte[]? searchedNodeId, CancellationToken can int count = failRequestCount > 0 ? failRequestCount : _discoveryConfig.Concurrency; IEnumerable nodesToSend = tryCandidates.Skip(nodesTriedCount).Take(count); - IEnumerable> sendFindNodeTasks = SendFindNodes(searchedNodeId, nodesToSend, alreadyTriedNodes); - Result?[] results = await Task.WhenAll(sendFindNodeTasks); + using ArrayPoolList> sendFindNodeTasks = SendFindNodes(searchedNodeId, nodesToSend, alreadyTriedNodes).ToPooledList(count); + Result[] results = await Task.WhenAll(sendFindNodeTasks.AsSpan()); if (results.Length == 0) { diff --git a/src/Nethermind/Nethermind.Network/CompositeNodeSource.cs b/src/Nethermind/Nethermind.Network/CompositeNodeSource.cs index 1402ebcf725..811eb1c3a81 100644 --- a/src/Nethermind/Nethermind.Network/CompositeNodeSource.cs +++ b/src/Nethermind/Nethermind.Network/CompositeNodeSource.cs @@ -8,6 +8,8 @@ using System.Threading; using System.Threading.Channels; using System.Threading.Tasks; +using Nethermind.Core.Collections; +using Nethermind.Core.Extensions; using Nethermind.Stats.Model; namespace Nethermind.Network; @@ -20,14 +22,13 @@ public async IAsyncEnumerable DiscoverNodes([EnumeratorCancellation] Cance { Channel ch = Channel.CreateBounded(1); - // TODO: .Net 9 stackalloc - Task[] feedTasks = _nodeSources.Select(async innerSource => + using ArrayPoolList feedTasks = _nodeSources.Select(async innerSource => { await foreach (Node node in innerSource.DiscoverNodes(cancellationToken)) { await ch.Writer.WriteAsync(node, cancellationToken); } - }).ToArray(); + }).ToPooledList(_nodeSources.Length * 16); try { @@ -38,7 +39,7 @@ public async IAsyncEnumerable DiscoverNodes([EnumeratorCancellation] Cance } finally { - await Task.WhenAll(feedTasks); + await Task.WhenAll(feedTasks.AsSpan()); } } diff --git a/src/Nethermind/Nethermind.Network/PeerManager.cs b/src/Nethermind/Nethermind.Network/PeerManager.cs index 360c2101c50..8a31475d283 100644 --- a/src/Nethermind/Nethermind.Network/PeerManager.cs +++ b/src/Nethermind/Nethermind.Network/PeerManager.cs @@ -12,7 +12,9 @@ using FastEnumUtility; using Nethermind.Core; using Nethermind.Core.Attributes; +using Nethermind.Core.Collections; using Nethermind.Core.Crypto; +using Nethermind.Core.Extensions; using Nethermind.Logging; using Nethermind.Network.Config; using Nethermind.Network.P2P; @@ -206,7 +208,7 @@ private class CandidateSelection private async Task RunPeerUpdateLoop() { Channel taskChannel = Channel.CreateBounded(1); - List? tasks = Enumerable.Range(0, _outgoingConnectParallelism).Select(async (idx) => + using ArrayPoolList tasks = Enumerable.Range(0, _outgoingConnectParallelism).Select(async idx => { await foreach (Peer peer in taskChannel.Reader.ReadAllAsync(_cancellationTokenSource.Token)) { @@ -226,7 +228,7 @@ private async Task RunPeerUpdateLoop() } } if (_logger.IsDebug) _logger.Debug($"Connect worker {idx} completed"); - }).ToList(); + }).ToPooledList(_outgoingConnectParallelism); int loopCount = 0; long previousActivePeersCount = 0; @@ -359,7 +361,7 @@ private async Task RunPeerUpdateLoop() } taskChannel.Writer.Complete(); - await Task.WhenAll(tasks); + await Task.WhenAll(tasks.AsSpan()); } private bool EnsureAvailableActivePeerSlot() diff --git a/src/Nethermind/Nethermind.Network/SessionMonitor.cs b/src/Nethermind/Nethermind.Network/SessionMonitor.cs index 0c2f73f2fbc..11ae1942a5e 100644 --- a/src/Nethermind/Nethermind.Network/SessionMonitor.cs +++ b/src/Nethermind/Nethermind.Network/SessionMonitor.cs @@ -5,6 +5,7 @@ using System.Collections.Concurrent; using System.Collections.Generic; using System.Linq; +using System.Runtime.InteropServices; using System.Threading; using System.Threading.Tasks; @@ -84,7 +85,7 @@ private async Task SendPingMessagesAsync() if (_pingTasks.Count > 0) { - bool[] tasks = await Task.WhenAll(_pingTasks); + bool[] tasks = await Task.WhenAll(CollectionsMarshal.AsSpan(_pingTasks)); int tasksLength = tasks.Length; if (tasksLength != 0) { diff --git a/src/Nethermind/Nethermind.Synchronization/Trie/TrieNodeRecovery.cs b/src/Nethermind/Nethermind.Synchronization/Trie/TrieNodeRecovery.cs index fa1c10d99ec..85c20a2da21 100644 --- a/src/Nethermind/Nethermind.Synchronization/Trie/TrieNodeRecovery.cs +++ b/src/Nethermind/Nethermind.Synchronization/Trie/TrieNodeRecovery.cs @@ -2,6 +2,7 @@ // SPDX-License-Identifier: LGPL-3.0-only using System; +using System.Collections.Generic; using System.Linq; using System.Threading; using System.Threading.Tasks; @@ -47,7 +48,8 @@ protected TrieNodeRecovery(ISyncPeerPool syncPeerPool, ILogManager? logManager) { while (keyRecoveries.Count > 0) { - Task<(Recovery, byte[]?)> task = await Task.WhenAny(keyRecoveries.Select(kr => kr.Task!)); + using ArrayPoolList>? tasks = keyRecoveries.Select(kr => kr.Task!).ToPooledList(keyRecoveries.Count); + Task<(Recovery, byte[]?)> task = await Task.WhenAny<(Recovery, byte[]?)>(tasks.AsSpan()); (Recovery Recovery, byte[]? Data) result = await task; if (result.Data is null) { @@ -57,7 +59,7 @@ protected TrieNodeRecovery(ISyncPeerPool syncPeerPool, ILogManager? logManager) else { if (_logger.IsWarn) _logger.Warn($"Successfully recovered from peer {result.Recovery.Peer} with {result.Data.Length} bytes!"); - cts.Cancel(); + await cts.CancelAsync(); return result.Data; } } diff --git a/src/Nethermind/Nethermind.Trie/BatchedTrieVisitor.cs b/src/Nethermind/Nethermind.Trie/BatchedTrieVisitor.cs index 99bff648354..2cfc57b4556 100644 --- a/src/Nethermind/Nethermind.Trie/BatchedTrieVisitor.cs +++ b/src/Nethermind/Nethermind.Trie/BatchedTrieVisitor.cs @@ -142,12 +142,11 @@ public void Start( try { - // TODO: .Net 9 stackalloc - Task[]? tasks = Enumerable.Range(0, trieVisitContext.MaxDegreeOfParallelism) + using ArrayPoolList tasks = Enumerable.Range(0, trieVisitContext.MaxDegreeOfParallelism) .Select(_ => Task.Run(BatchedThread)) - .ToArray(); + .ToPooledList(trieVisitContext.MaxDegreeOfParallelism); - Task.WaitAll(tasks); + Task.WaitAll(tasks.AsSpan()); } catch (Exception) { diff --git a/src/Nethermind/Nethermind.Trie/PatriciaTree.cs b/src/Nethermind/Nethermind.Trie/PatriciaTree.cs index 88b443a3983..6380da3aa2a 100644 --- a/src/Nethermind/Nethermind.Trie/PatriciaTree.cs +++ b/src/Nethermind/Nethermind.Trie/PatriciaTree.cs @@ -207,7 +207,6 @@ Task CreateTaskForPath(TreePath childPath, TrieNode childNode, int idx) => Task. committer.ReturnConcurrencyQuota(); }); - // TODO: .Net 9 stackalloc ArrayPoolList? childTasks = null; for (int i = 0; i < 16; i++) @@ -240,7 +239,7 @@ Task CreateTaskForPath(TreePath childPath, TrieNode childNode, int idx) => Task. if (childTasks is not null) { - Task.WaitAll(childTasks.ToArray()); + Task.WaitAll(childTasks.AsSpan()); childTasks.Dispose(); } } diff --git a/src/Nethermind/Nethermind.Trie/Pruning/TrieStore.cs b/src/Nethermind/Nethermind.Trie/Pruning/TrieStore.cs index 6ae6ca164fc..f5e3caa9c6a 100644 --- a/src/Nethermind/Nethermind.Trie/Pruning/TrieStore.cs +++ b/src/Nethermind/Nethermind.Trie/Pruning/TrieStore.cs @@ -797,11 +797,13 @@ void TopLevelPersist(TrieNode tn, Hash256? address2, TreePath path) }); } - Task.WaitAll(parallelStartNodes.Select(entry => Task.Run(() => + using ArrayPoolList tasks = parallelStartNodes.Select(entry => Task.Run(() => { (TrieNode trieNode, Hash256? address2, TreePath path2) = entry; PersistNodeStartingFrom(trieNode, address2, path2, commitSet, persistedNodeRecorder, writeFlags, disposeQueue); - })).ToArray()); + })).ToPooledList(parallelStartNodes.Count); + + Task.WaitAll(tasks.AsSpan()); disposeQueue.CompleteAdding(); Task.WaitAll(_disposeTasks); From 351b80ad8bdbb1dbb07754f4ae9263a1e85832f7 Mon Sep 17 00:00:00 2001 From: "lukasz.rozmej" Date: Fri, 11 Oct 2024 00:51:57 +0200 Subject: [PATCH 07/14] use in test --- .../Nethermind.Core.Test/Encoding/TxDecoderTests.cs | 11 ++++++----- .../Tracing/GethLikeCallTracerTests.cs | 2 +- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/src/Nethermind/Nethermind.Core.Test/Encoding/TxDecoderTests.cs b/src/Nethermind/Nethermind.Core.Test/Encoding/TxDecoderTests.cs index 00ed095d83a..6d441e08f17 100644 --- a/src/Nethermind/Nethermind.Core.Test/Encoding/TxDecoderTests.cs +++ b/src/Nethermind/Nethermind.Core.Test/Encoding/TxDecoderTests.cs @@ -7,6 +7,7 @@ using System.Threading.Tasks; using FluentAssertions; using FluentAssertions.Numeric; +using Nethermind.Core.Collections; using Nethermind.Core.Crypto; using Nethermind.Core.Eip2930; using Nethermind.Core.Extensions; @@ -68,7 +69,7 @@ public class TxDecoderTests [TestCaseSource(nameof(TestCaseSource))] [Repeat(10)] // Might wanna increase this to double check when changing logic as on lower value, it does not reproduce. - public void CanCorrectlyCalculateTxHash_when_called_concurrently((Transaction Tx, string Description) testCase) + public async Task CanCorrectlyCalculateTxHash_when_called_concurrently((Transaction Tx, string Description) testCase) { Transaction tx = testCase.Tx; @@ -80,12 +81,12 @@ public void CanCorrectlyCalculateTxHash_when_called_concurrently((Transaction Tx decodedTx.SetPreHash(rlp.Bytes); - IEnumerable>>> tasks = Enumerable + using ArrayPoolList>>> tasks = Enumerable .Range(0, 32) - .Select(_ => Task.Factory.StartNew(() => decodedTx.Hash.Should().Be(expectedHash), - TaskCreationOptions.RunContinuationsAsynchronously)); + .Select(_ => Task.Factory.StartNew(() => decodedTx.Hash.Should().Be(expectedHash), TaskCreationOptions.RunContinuationsAsynchronously)) + .ToPooledList(32); - Task.WaitAll(tasks); + await Task.WhenAll>>(tasks.AsSpan()); } [TestCaseSource(nameof(TestCaseSource))] diff --git a/src/Nethermind/Nethermind.Evm.Test/Tracing/GethLikeCallTracerTests.cs b/src/Nethermind/Nethermind.Evm.Test/Tracing/GethLikeCallTracerTests.cs index c356ce7adc9..10eea16fa7a 100644 --- a/src/Nethermind/Nethermind.Evm.Test/Tracing/GethLikeCallTracerTests.cs +++ b/src/Nethermind/Nethermind.Evm.Test/Tracing/GethLikeCallTracerTests.cs @@ -17,7 +17,7 @@ namespace Nethermind.Evm.Test.Tracing; [TestFixture] public class GethLikeCallTracerTests : VirtualMachineTestsBase { - private static readonly JsonSerializerOptions SerializerOptions = new(EthereumJsonSerializer.JsonOptionsIndented) { NewLine = "\n"}; + private static readonly JsonSerializerOptions SerializerOptions = new(EthereumJsonSerializer.JsonOptionsIndented) { NewLine = "\n" }; private const string? WithLog = """{"withLog":true}"""; private const string? OnlyTopCall = """{"onlyTopCall":true}"""; private const string? WithLogAndOnlyTopCall = """{"withLog":true,"onlyTopCall":true}"""; From e000fe06cb60c4f739bce9584032af53f38f1572 Mon Sep 17 00:00:00 2001 From: Ruben Buniatyan Date: Fri, 11 Oct 2024 14:11:46 +0200 Subject: [PATCH 08/14] Remove redundant `$TARGETPLATFORM` --- Dockerfile | 2 +- Dockerfile.chiseled | 2 +- Dockerfile.diag | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Dockerfile b/Dockerfile index 1b06cdb804a..38e071e14b1 100644 --- a/Dockerfile +++ b/Dockerfile @@ -18,7 +18,7 @@ RUN arch=$([ "$TARGETARCH" = "amd64" ] && echo "x64" || echo "$TARGETARCH") && \ # A temporary symlink to support the old executable name RUN ln -s -r /publish/nethermind /publish/Nethermind.Runner -FROM --platform=$TARGETPLATFORM mcr.microsoft.com/dotnet/aspnet:9.0-noble +FROM mcr.microsoft.com/dotnet/aspnet:9.0-noble WORKDIR /nethermind diff --git a/Dockerfile.chiseled b/Dockerfile.chiseled index ae64366ae20..a646cfb37d3 100644 --- a/Dockerfile.chiseled +++ b/Dockerfile.chiseled @@ -21,7 +21,7 @@ RUN cd /publish && \ mkdir logs && \ mkdir nethermind_db -FROM --platform=$TARGETPLATFORM mcr.microsoft.com/dotnet/aspnet:9.0-noble-chiseled +FROM mcr.microsoft.com/dotnet/aspnet:9.0-noble-chiseled WORKDIR /nethermind diff --git a/Dockerfile.diag b/Dockerfile.diag index 4d18729150c..65013a4286c 100644 --- a/Dockerfile.diag +++ b/Dockerfile.diag @@ -22,7 +22,7 @@ RUN dotnet tool install -g dotnet-dump && \ dotnet tool install -g dotnet-trace && \ dotnet tool install -g JetBrains.dotTrace.GlobalTools -FROM --platform=$TARGETPLATFORM mcr.microsoft.com/dotnet/aspnet:9.0-noble +FROM mcr.microsoft.com/dotnet/aspnet:9.0-noble WORKDIR /nethermind From 082149f300381579015e121421ac73c9a68d9c0e Mon Sep 17 00:00:00 2001 From: Ruben Buniatyan Date: Mon, 14 Oct 2024 23:00:18 +0200 Subject: [PATCH 09/14] Update packages --- src/Nethermind/Directory.Packages.props | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/src/Nethermind/Directory.Packages.props b/src/Nethermind/Directory.Packages.props index e927b7c7605..d72b238e8d8 100644 --- a/src/Nethermind/Directory.Packages.props +++ b/src/Nethermind/Directory.Packages.props @@ -26,8 +26,8 @@ - - + + @@ -37,9 +37,9 @@ - - - + + + @@ -72,10 +72,10 @@ - - + + - + From cbf62048345291e37d2d9425230c41e53bc28ef8 Mon Sep 17 00:00:00 2001 From: Amirul Ashraf Date: Fri, 18 Oct 2024 08:05:06 +0800 Subject: [PATCH 10/14] Remove to array --- src/Nethermind/Nethermind.State/PersistentStorageProvider.cs | 2 +- src/Nethermind/Nethermind.Trie/Pruning/TrieStore.cs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/Nethermind/Nethermind.State/PersistentStorageProvider.cs b/src/Nethermind/Nethermind.State/PersistentStorageProvider.cs index 27f814222f8..5af8d6aa374 100644 --- a/src/Nethermind/Nethermind.State/PersistentStorageProvider.cs +++ b/src/Nethermind/Nethermind.State/PersistentStorageProvider.cs @@ -347,7 +347,7 @@ public void CommitTrees(IBlockCommitter blockCommitter) } } - Task.WaitAll(commitTask.ToArray()); + Task.WaitAll(commitTask); _toUpdateRoots.Clear(); // only needed here as there is no control over cached storage size otherwise diff --git a/src/Nethermind/Nethermind.Trie/Pruning/TrieStore.cs b/src/Nethermind/Nethermind.Trie/Pruning/TrieStore.cs index b0cacf09bf0..1c603ccae5d 100644 --- a/src/Nethermind/Nethermind.Trie/Pruning/TrieStore.cs +++ b/src/Nethermind/Nethermind.Trie/Pruning/TrieStore.cs @@ -777,7 +777,7 @@ void TopLevelPersist(TrieNode tn, Hash256? address2, TreePath path) { (TrieNode trieNode, Hash256? address2, TreePath path2) = entry; PersistNodeStartingFrom(trieNode, address2, path2, persistedNodeRecorder, writeFlags, disposeQueue); - })).ToArray()); + }))); disposeQueue.CompleteAdding(); Task.WaitAll(_disposeTasks); From da5da85575b0c84f12bf8791ca367ff86cae4d5f Mon Sep 17 00:00:00 2001 From: Amirul Ashraf Date: Fri, 18 Oct 2024 08:17:45 +0800 Subject: [PATCH 11/14] Use span --- src/Nethermind/Nethermind.State/PersistentStorageProvider.cs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Nethermind/Nethermind.State/PersistentStorageProvider.cs b/src/Nethermind/Nethermind.State/PersistentStorageProvider.cs index 5af8d6aa374..b1b418037b2 100644 --- a/src/Nethermind/Nethermind.State/PersistentStorageProvider.cs +++ b/src/Nethermind/Nethermind.State/PersistentStorageProvider.cs @@ -347,7 +347,7 @@ public void CommitTrees(IBlockCommitter blockCommitter) } } - Task.WaitAll(commitTask); + Task.WaitAll(commitTask.AsSpan()); _toUpdateRoots.Clear(); // only needed here as there is no control over cached storage size otherwise From 3eb0e3b913e17f20c32b23f8e4088a102e4aaaa0 Mon Sep 17 00:00:00 2001 From: Amirul Ashraf Date: Fri, 18 Oct 2024 08:22:42 +0800 Subject: [PATCH 12/14] Fix build --- src/Nethermind/Nethermind.Trie/Pruning/TrieStore.cs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/Nethermind/Nethermind.Trie/Pruning/TrieStore.cs b/src/Nethermind/Nethermind.Trie/Pruning/TrieStore.cs index 43bb8c2001d..d2c19e70b56 100644 --- a/src/Nethermind/Nethermind.Trie/Pruning/TrieStore.cs +++ b/src/Nethermind/Nethermind.Trie/Pruning/TrieStore.cs @@ -776,7 +776,7 @@ void TopLevelPersist(TrieNode tn, Hash256? address2, TreePath path) using ArrayPoolList tasks = parallelStartNodes.Select(entry => Task.Run(() => { (TrieNode trieNode, Hash256? address2, TreePath path2) = entry; - PersistNodeStartingFrom(trieNode, address2, path2, commitSet, persistedNodeRecorder, writeFlags, disposeQueue); + PersistNodeStartingFrom(trieNode, address2, path2, persistedNodeRecorder, writeFlags, disposeQueue); })).ToPooledList(parallelStartNodes.Count); Task.WaitAll(tasks.AsSpan()); From 526371c7ff4b9a9268657849b4d728fe21e10b03 Mon Sep 17 00:00:00 2001 From: "lukasz.rozmej" Date: Wed, 20 Nov 2024 11:35:52 +0100 Subject: [PATCH 13/14] fix merge --- .../PersistentStorageProvider.cs | 771 +++++---- .../PersistentStorageProvider.cs.orig | 595 ------- .../Nethermind.Trie/Pruning/TrieStore.cs.orig | 1385 ----------------- 3 files changed, 385 insertions(+), 2366 deletions(-) delete mode 100644 src/Nethermind/Nethermind.State/PersistentStorageProvider.cs.orig delete mode 100644 src/Nethermind/Nethermind.Trie/Pruning/TrieStore.cs.orig diff --git a/src/Nethermind/Nethermind.State/PersistentStorageProvider.cs b/src/Nethermind/Nethermind.State/PersistentStorageProvider.cs index b1b418037b2..9bbbf99282d 100644 --- a/src/Nethermind/Nethermind.State/PersistentStorageProvider.cs +++ b/src/Nethermind/Nethermind.State/PersistentStorageProvider.cs @@ -19,525 +19,524 @@ using Nethermind.State.Tracing; using Nethermind.Trie.Pruning; -namespace Nethermind.State +namespace Nethermind.State; + +using Nethermind.Core.Cpu; +/// +/// Manages persistent storage allowing for snapshotting and restoring +/// Persists data to ITrieStore +/// +internal sealed class PersistentStorageProvider : PartialStorageProviderBase { - using Nethermind.Core.Cpu; + private readonly ITrieStore _trieStore; + private readonly StateProvider _stateProvider; + private readonly ILogManager? _logManager; + internal readonly IStorageTreeFactory _storageTreeFactory; + private readonly Dictionary _storages = new(); + private readonly HashSet _toUpdateRoots = new(); + + /// + /// EIP-1283 + /// + private readonly Dictionary _originalValues = new(); + + private readonly HashSet _committedThisRound = new(); + private readonly Dictionary> _blockCache = new(4_096); + private readonly ConcurrentDictionary? _preBlockCache; + private readonly Func _loadFromTree; + /// /// Manages persistent storage allowing for snapshotting and restoring /// Persists data to ITrieStore /// - internal sealed class PersistentStorageProvider : PartialStorageProviderBase + public PersistentStorageProvider(ITrieStore trieStore, + StateProvider stateProvider, + ILogManager logManager, + IStorageTreeFactory? storageTreeFactory, + ConcurrentDictionary? preBlockCache, + bool populatePreBlockCache) : base(logManager) { - private readonly ITrieStore _trieStore; - private readonly StateProvider _stateProvider; - private readonly ILogManager? _logManager; - internal readonly IStorageTreeFactory _storageTreeFactory; - private readonly Dictionary _storages = new(); - private readonly HashSet _toUpdateRoots = new(); - - /// - /// EIP-1283 - /// - private readonly Dictionary _originalValues = new(); - - private readonly HashSet _committedThisRound = new(); - private readonly Dictionary> _blockCache = new(4_096); - private readonly ConcurrentDictionary? _preBlockCache; - private readonly Func _loadFromTree; - - /// - /// Manages persistent storage allowing for snapshotting and restoring - /// Persists data to ITrieStore - /// - public PersistentStorageProvider(ITrieStore trieStore, - StateProvider stateProvider, - ILogManager logManager, - IStorageTreeFactory? storageTreeFactory, - ConcurrentDictionary? preBlockCache, - bool populatePreBlockCache) : base(logManager) - { - _trieStore = trieStore ?? throw new ArgumentNullException(nameof(trieStore)); - _stateProvider = stateProvider ?? throw new ArgumentNullException(nameof(stateProvider)); - _logManager = logManager ?? throw new ArgumentNullException(nameof(logManager)); - _storageTreeFactory = storageTreeFactory ?? new StorageTreeFactory(); - _preBlockCache = preBlockCache; - _populatePreBlockCache = populatePreBlockCache; - _loadFromTree = LoadFromTreeStorage; - } + _trieStore = trieStore ?? throw new ArgumentNullException(nameof(trieStore)); + _stateProvider = stateProvider ?? throw new ArgumentNullException(nameof(stateProvider)); + _logManager = logManager ?? throw new ArgumentNullException(nameof(logManager)); + _storageTreeFactory = storageTreeFactory ?? new StorageTreeFactory(); + _preBlockCache = preBlockCache; + _populatePreBlockCache = populatePreBlockCache; + _loadFromTree = LoadFromTreeStorage; + } + + public Hash256 StateRoot { get; set; } = null!; + private readonly bool _populatePreBlockCache; + + /// + /// Reset the storage state + /// + public override void Reset(bool resizeCollections = true) + { + base.Reset(); + _blockCache.Clear(); + _storages.Clear(); + _originalValues.Clear(); + _committedThisRound.Clear(); + _toUpdateRoots.Clear(); + } - public Hash256 StateRoot { get; set; } = null!; - private readonly bool _populatePreBlockCache; + /// + /// Get the current value at the specified location + /// + /// Storage location + /// Value at location + protected override ReadOnlySpan GetCurrentValue(in StorageCell storageCell) => + TryGetCachedValue(storageCell, out byte[]? bytes) ? bytes! : LoadFromTree(storageCell); - /// - /// Reset the storage state - /// - public override void Reset(bool resizeCollections = true) + /// + /// Return the original persistent storage value from the storage cell + /// + /// + /// + public byte[] GetOriginal(in StorageCell storageCell) + { + if (!_originalValues.TryGetValue(storageCell, out var value)) { - base.Reset(); - _blockCache.Clear(); - _storages.Clear(); - _originalValues.Clear(); - _committedThisRound.Clear(); - _toUpdateRoots.Clear(); + throw new InvalidOperationException("Get original should only be called after get within the same caching round"); } - /// - /// Get the current value at the specified location - /// - /// Storage location - /// Value at location - protected override ReadOnlySpan GetCurrentValue(in StorageCell storageCell) => - TryGetCachedValue(storageCell, out byte[]? bytes) ? bytes! : LoadFromTree(storageCell); - - /// - /// Return the original persistent storage value from the storage cell - /// - /// - /// - public byte[] GetOriginal(in StorageCell storageCell) + if (_transactionChangesSnapshots.TryPeek(out int snapshot)) { - if (!_originalValues.TryGetValue(storageCell, out var value)) + if (_intraBlockCache.TryGetValue(storageCell, out StackList stack)) { - throw new InvalidOperationException("Get original should only be called after get within the same caching round"); - } - - if (_transactionChangesSnapshots.TryPeek(out int snapshot)) - { - if (_intraBlockCache.TryGetValue(storageCell, out StackList stack)) + if (stack.TryGetSearchedItem(snapshot, out int lastChangeIndexBeforeOriginalSnapshot)) { - if (stack.TryGetSearchedItem(snapshot, out int lastChangeIndexBeforeOriginalSnapshot)) - { - return _changes[lastChangeIndexBeforeOriginalSnapshot].Value; - } + return _changes[lastChangeIndexBeforeOriginalSnapshot].Value; } } - - return value; } - private HashSet? _tempToUpdateRoots; - /// - /// Called by Commit - /// Used for persistent storage specific logic - /// - /// Storage tracer - protected override void CommitCore(IStorageTracer tracer) + return value; + } + + private HashSet? _tempToUpdateRoots; + /// + /// Called by Commit + /// Used for persistent storage specific logic + /// + /// Storage tracer + protected override void CommitCore(IStorageTracer tracer) + { + if (_logger.IsTrace) _logger.Trace("Committing storage changes"); + + int currentPosition = _changes.Count - 1; + if (currentPosition < 0) + { + return; + } + if (_changes[currentPosition].IsNull) { - if (_logger.IsTrace) _logger.Trace("Committing storage changes"); + throw new InvalidOperationException($"Change at current position {currentPosition} was null when committing {nameof(PartialStorageProviderBase)}"); + } - int currentPosition = _changes.Count - 1; - if (currentPosition < 0) - { - return; - } - if (_changes[currentPosition].IsNull) - { - throw new InvalidOperationException($"Change at current position {currentPosition} was null when committing {nameof(PartialStorageProviderBase)}"); - } + HashSet toUpdateRoots = (_tempToUpdateRoots ??= new()); - HashSet toUpdateRoots = (_tempToUpdateRoots ??= new()); + bool isTracing = tracer.IsTracingStorage; + Dictionary? trace = null; + if (isTracing) + { + trace = new Dictionary(); + } - bool isTracing = tracer.IsTracingStorage; - Dictionary? trace = null; - if (isTracing) + for (int i = 0; i <= currentPosition; i++) + { + Change change = _changes[currentPosition - i]; + if (!isTracing && change!.ChangeType == ChangeType.JustCache) { - trace = new Dictionary(); + continue; } - for (int i = 0; i <= currentPosition; i++) + if (_committedThisRound.Contains(change!.StorageCell)) { - Change change = _changes[currentPosition - i]; - if (!isTracing && change!.ChangeType == ChangeType.JustCache) - { - continue; - } - - if (_committedThisRound.Contains(change!.StorageCell)) - { - if (isTracing && change.ChangeType == ChangeType.JustCache) - { - trace![change.StorageCell] = new ChangeTrace(change.Value, trace[change.StorageCell].After); - } - - continue; - } - if (isTracing && change.ChangeType == ChangeType.JustCache) { - tracer!.ReportStorageRead(change.StorageCell); + trace![change.StorageCell] = new ChangeTrace(change.Value, trace[change.StorageCell].After); } - _committedThisRound.Add(change.StorageCell); - - if (change.ChangeType == ChangeType.Destroy) - { - continue; - } - - int forAssertion = _intraBlockCache[change.StorageCell].Pop(); - if (forAssertion != currentPosition - i) - { - throw new InvalidOperationException($"Expected checked value {forAssertion} to be equal to {currentPosition} - {i}"); - } - - switch (change.ChangeType) - { - case ChangeType.Destroy: - break; - case ChangeType.JustCache: - break; - case ChangeType.Update: - if (_logger.IsTrace) - { - _logger.Trace($" Update {change.StorageCell.Address}_{change.StorageCell.Index} V = {change.Value.ToHexString(true)}"); - } - - SaveToTree(toUpdateRoots, change); - - if (isTracing) - { - trace![change.StorageCell] = new ChangeTrace(change.Value); - } - - break; - default: - throw new ArgumentOutOfRangeException(); - } + continue; } - foreach (AddressAsKey address in toUpdateRoots) + if (isTracing && change.ChangeType == ChangeType.JustCache) { - // since the accounts could be empty accounts that are removing (EIP-158) - if (_stateProvider.AccountExists(address)) - { - _toUpdateRoots.Add(address); - } - else - { - _toUpdateRoots.Remove(address); - _storages.Remove(address); - } + tracer!.ReportStorageRead(change.StorageCell); } - toUpdateRoots.Clear(); - base.CommitCore(tracer); - _originalValues.Clear(); - _committedThisRound.Clear(); + _committedThisRound.Add(change.StorageCell); - if (isTracing) + if (change.ChangeType == ChangeType.Destroy) { - ReportChanges(tracer!, trace!); + continue; } - } - protected override void CommitStorageRoots() - { - if (_toUpdateRoots.Count == 0) + int forAssertion = _intraBlockCache[change.StorageCell].Pop(); + if (forAssertion != currentPosition - i) { - return; + throw new InvalidOperationException($"Expected checked value {forAssertion} to be equal to {currentPosition} - {i}"); } - // Is overhead of parallel foreach worth it? - if (_toUpdateRoots.Count <= 4) - { - UpdateRootHashesSingleThread(); - } - else + switch (change.ChangeType) { - UpdateRootHashesMultiThread(); - } - - void UpdateRootHashesSingleThread() - { - foreach (KeyValuePair kvp in _storages) - { - if (!_toUpdateRoots.Contains(kvp.Key)) + case ChangeType.Destroy: + break; + case ChangeType.JustCache: + break; + case ChangeType.Update: + if (_logger.IsTrace) { - // Wasn't updated don't recalculate - continue; + _logger.Trace($" Update {change.StorageCell.Address}_{change.StorageCell.Index} V = {change.Value.ToHexString(true)}"); } - StorageTree storageTree = kvp.Value; - storageTree.UpdateRootHash(canBeParallel: true); - _stateProvider.UpdateStorageRoot(address: kvp.Key, storageTree.RootHash); - } - } + SaveToTree(toUpdateRoots, change); - void UpdateRootHashesMultiThread() - { - // We can recalculate the roots in parallel as they are all independent tries - Parallel.ForEach(_storages, RuntimeInformation.ParallelOptionsLogicalCores, kvp => - { - if (!_toUpdateRoots.Contains(kvp.Key)) + if (isTracing) { - // Wasn't updated don't recalculate - return; + trace![change.StorageCell] = new ChangeTrace(change.Value); } - StorageTree storageTree = kvp.Value; - storageTree.UpdateRootHash(canBeParallel: false); - }); - - // Update the storage roots in the main thread non in parallel - foreach (KeyValuePair kvp in _storages) - { - if (!_toUpdateRoots.Contains(kvp.Key)) - { - continue; - } - - // Update the storage root for the Account - _stateProvider.UpdateStorageRoot(address: kvp.Key, kvp.Value.RootHash); - } + break; + default: + throw new ArgumentOutOfRangeException(); } } - private void SaveToTree(HashSet toUpdateRoots, Change change) + foreach (AddressAsKey address in toUpdateRoots) { - if (_originalValues.TryGetValue(change.StorageCell, out byte[] initialValue) && - initialValue.AsSpan().SequenceEqual(change.Value)) + // since the accounts could be empty accounts that are removing (EIP-158) + if (_stateProvider.AccountExists(address)) { - // no need to update the tree if the value is the same - return; + _toUpdateRoots.Add(address); } - - StorageTree tree = GetOrCreateStorage(change.StorageCell.Address); - Db.Metrics.StorageTreeWrites++; - toUpdateRoots.Add(change.StorageCell.Address); - tree.Set(change.StorageCell.Index, change.Value); - - ref SelfDestructDictionary? dict = ref CollectionsMarshal.GetValueRefOrAddDefault(_blockCache, change.StorageCell.Address, out bool exists); - if (!exists) + else { - dict = new SelfDestructDictionary(StorageTree.EmptyBytes); + _toUpdateRoots.Remove(address); + _storages.Remove(address); } + } + toUpdateRoots.Clear(); + + base.CommitCore(tracer); + _originalValues.Clear(); + _committedThisRound.Clear(); - dict[change.StorageCell.Index] = change.Value; + if (isTracing) + { + ReportChanges(tracer!, trace!); + } + } + + protected override void CommitStorageRoots() + { + if (_toUpdateRoots.Count == 0) + { + return; } - /// - /// Commit persistent storage trees - /// - /// Current block number - public void CommitTrees(IBlockCommitter blockCommitter) + // Is overhead of parallel foreach worth it? + if (_toUpdateRoots.Count <= 4) { - // Note: These all runs in about 0.4ms. So the little overhead like attempting to sort the tasks - // may make it worst. Always check on mainnet. + UpdateRootHashesSingleThread(); + } + else + { + UpdateRootHashesMultiThread(); + } - using ArrayPoolList commitTask = new ArrayPoolList(_storages.Count); - foreach (KeyValuePair storage in _storages) + void UpdateRootHashesSingleThread() + { + foreach (KeyValuePair kvp in _storages) { - if (!_toUpdateRoots.Contains(storage.Key)) + if (!_toUpdateRoots.Contains(kvp.Key)) { + // Wasn't updated don't recalculate continue; } - if (blockCommitter.TryRequestConcurrencyQuota()) + StorageTree storageTree = kvp.Value; + storageTree.UpdateRootHash(canBeParallel: true); + _stateProvider.UpdateStorageRoot(address: kvp.Key, storageTree.RootHash); + } + } + + void UpdateRootHashesMultiThread() + { + // We can recalculate the roots in parallel as they are all independent tries + Parallel.ForEach(_storages, RuntimeInformation.ParallelOptionsLogicalCores, kvp => + { + if (!_toUpdateRoots.Contains(kvp.Key)) { - commitTask.Add(Task.Factory.StartNew((ctx) => - { - StorageTree st = (StorageTree)ctx; - st.Commit(); - blockCommitter.ReturnConcurrencyQuota(); - }, storage.Value)); + // Wasn't updated don't recalculate + return; } - else + StorageTree storageTree = kvp.Value; + storageTree.UpdateRootHash(canBeParallel: false); + }); + + // Update the storage roots in the main thread non in parallel + foreach (KeyValuePair kvp in _storages) + { + if (!_toUpdateRoots.Contains(kvp.Key)) { - storage.Value.Commit(); + continue; } - } - Task.WaitAll(commitTask.AsSpan()); + // Update the storage root for the Account + _stateProvider.UpdateStorageRoot(address: kvp.Key, kvp.Value.RootHash); + } - _toUpdateRoots.Clear(); - // only needed here as there is no control over cached storage size otherwise - _storages.Clear(); } + } - private StorageTree GetOrCreateStorage(Address address) + private void SaveToTree(HashSet toUpdateRoots, Change change) + { + if (_originalValues.TryGetValue(change.StorageCell, out byte[] initialValue) && + initialValue.AsSpan().SequenceEqual(change.Value)) { - ref StorageTree? value = ref CollectionsMarshal.GetValueRefOrAddDefault(_storages, address, out bool exists); - if (!exists) - { - value = _storageTreeFactory.Create(address, _trieStore.GetTrieStore(address.ToAccountPath), _stateProvider.GetStorageRoot(address), StateRoot, _logManager); - } - - return value; + // no need to update the tree if the value is the same + return; } - public void WarmUp(in StorageCell storageCell, bool isEmpty) + StorageTree tree = GetOrCreateStorage(change.StorageCell.Address); + Db.Metrics.StorageTreeWrites++; + toUpdateRoots.Add(change.StorageCell.Address); + tree.Set(change.StorageCell.Index, change.Value); + + ref SelfDestructDictionary? dict = ref CollectionsMarshal.GetValueRefOrAddDefault(_blockCache, change.StorageCell.Address, out bool exists); + if (!exists) { - if (isEmpty) - { - if (_preBlockCache is not null) - { - _preBlockCache[storageCell] = []; - } - } - else - { - LoadFromTree(in storageCell); - } + dict = new SelfDestructDictionary(StorageTree.EmptyBytes); } - private ReadOnlySpan LoadFromTree(in StorageCell storageCell) + dict[change.StorageCell.Index] = change.Value; + } + + /// + /// Commit persistent storage trees + /// + /// Current block number + public void CommitTrees(IBlockCommitter blockCommitter) + { + // Note: These all runs in about 0.4ms. So the little overhead like attempting to sort the tasks + // may make it worst. Always check on mainnet. + + using ArrayPoolList commitTask = new ArrayPoolList(_storages.Count); + foreach (KeyValuePair storage in _storages) { - ref SelfDestructDictionary? dict = ref CollectionsMarshal.GetValueRefOrAddDefault(_blockCache, storageCell.Address, out bool exists); - if (!exists) + if (!_toUpdateRoots.Contains(storage.Key)) { - dict = new SelfDestructDictionary(StorageTree.EmptyBytes); + continue; } - ref byte[]? value = ref dict.GetValueRefOrAddDefault(storageCell.Index, out exists); - if (!exists) + if (blockCommitter.TryRequestConcurrencyQuota()) { - value = !_populatePreBlockCache ? - LoadFromTreeReadPreWarmCache(in storageCell) : - LoadFromTreePopulatePrewarmCache(in storageCell); + commitTask.Add(Task.Factory.StartNew((ctx) => + { + StorageTree st = (StorageTree)ctx; + st.Commit(); + blockCommitter.ReturnConcurrencyQuota(); + }, storage.Value)); } else { - Db.Metrics.IncrementStorageTreeCache(); + storage.Value.Commit(); } - - if (!storageCell.IsHash) PushToRegistryOnly(storageCell, value); - return value; } - private byte[] LoadFromTreePopulatePrewarmCache(in StorageCell storageCell) - { - long priorReads = Db.Metrics.ThreadLocalStorageTreeReads; + Task.WaitAll(commitTask.AsSpan()); - byte[] value = _preBlockCache is not null - ? _preBlockCache.GetOrAdd(storageCell, _loadFromTree) - : _loadFromTree(storageCell); + _toUpdateRoots.Clear(); + // only needed here as there is no control over cached storage size otherwise + _storages.Clear(); + } - if (Db.Metrics.ThreadLocalStorageTreeReads == priorReads) - { - // Read from Concurrent Cache - Db.Metrics.IncrementStorageTreeCache(); - } - return value; + private StorageTree GetOrCreateStorage(Address address) + { + ref StorageTree? value = ref CollectionsMarshal.GetValueRefOrAddDefault(_storages, address, out bool exists); + if (!exists) + { + value = _storageTreeFactory.Create(address, _trieStore.GetTrieStore(address.ToAccountPath), _stateProvider.GetStorageRoot(address), StateRoot, _logManager); } - private byte[] LoadFromTreeReadPreWarmCache(in StorageCell storageCell) + return value; + } + + public void WarmUp(in StorageCell storageCell, bool isEmpty) + { + if (isEmpty) { - if (_preBlockCache?.TryGetValue(storageCell, out byte[] value) ?? false) + if (_preBlockCache is not null) { - Db.Metrics.IncrementStorageTreeCache(); + _preBlockCache[storageCell] = []; } - else - { - value = _loadFromTree(storageCell); - } - return value; } - - private byte[] LoadFromTreeStorage(StorageCell storageCell) + else { - StorageTree tree = GetOrCreateStorage(storageCell.Address); - Db.Metrics.IncrementStorageTreeReads(); - return !storageCell.IsHash ? tree.Get(storageCell.Index) : tree.GetArray(storageCell.Hash.Bytes); + LoadFromTree(in storageCell); } + } - private void PushToRegistryOnly(in StorageCell cell, byte[] value) + private ReadOnlySpan LoadFromTree(in StorageCell storageCell) + { + ref SelfDestructDictionary? dict = ref CollectionsMarshal.GetValueRefOrAddDefault(_blockCache, storageCell.Address, out bool exists); + if (!exists) { - StackList stack = SetupRegistry(cell); - _originalValues[cell] = value; - stack.Push(_changes.Count); - _changes.Add(new Change(ChangeType.JustCache, cell, value)); + dict = new SelfDestructDictionary(StorageTree.EmptyBytes); } - private static void ReportChanges(IStorageTracer tracer, Dictionary trace) + ref byte[]? value = ref dict.GetValueRefOrAddDefault(storageCell.Index, out exists); + if (!exists) { - foreach ((StorageCell address, ChangeTrace change) in trace) - { - byte[] before = change.Before; - byte[] after = change.After; + value = !_populatePreBlockCache ? + LoadFromTreeReadPreWarmCache(in storageCell) : + LoadFromTreePopulatePrewarmCache(in storageCell); + } + else + { + Db.Metrics.IncrementStorageTreeCache(); + } - if (!Bytes.AreEqual(before, after)) - { - tracer.ReportStorageChange(address, before, after); - } - } + if (!storageCell.IsHash) PushToRegistryOnly(storageCell, value); + return value; + } + + private byte[] LoadFromTreePopulatePrewarmCache(in StorageCell storageCell) + { + long priorReads = Db.Metrics.ThreadLocalStorageTreeReads; + + byte[] value = _preBlockCache is not null + ? _preBlockCache.GetOrAdd(storageCell, _loadFromTree) + : _loadFromTree(storageCell); + + if (Db.Metrics.ThreadLocalStorageTreeReads == priorReads) + { + // Read from Concurrent Cache + Db.Metrics.IncrementStorageTreeCache(); } + return value; + } - private Hash256 RecalculateRootHash(Address address) + private byte[] LoadFromTreeReadPreWarmCache(in StorageCell storageCell) + { + if (_preBlockCache?.TryGetValue(storageCell, out byte[] value) ?? false) + { + Db.Metrics.IncrementStorageTreeCache(); + } + else { - StorageTree storageTree = GetOrCreateStorage(address); - storageTree.UpdateRootHash(); - return storageTree.RootHash; + value = _loadFromTree(storageCell); } + return value; + } - /// - /// Clear all storage at specified address - /// - /// Contract address - public override void ClearStorage(Address address) + private byte[] LoadFromTreeStorage(StorageCell storageCell) + { + StorageTree tree = GetOrCreateStorage(storageCell.Address); + Db.Metrics.IncrementStorageTreeReads(); + return !storageCell.IsHash ? tree.Get(storageCell.Index) : tree.GetArray(storageCell.Hash.Bytes); + } + + private void PushToRegistryOnly(in StorageCell cell, byte[] value) + { + StackList stack = SetupRegistry(cell); + _originalValues[cell] = value; + stack.Push(_changes.Count); + _changes.Add(new Change(ChangeType.JustCache, cell, value)); + } + + private static void ReportChanges(IStorageTracer tracer, Dictionary trace) + { + foreach ((StorageCell address, ChangeTrace change) in trace) { - base.ClearStorage(address); + byte[] before = change.Before; + byte[] after = change.After; - ref SelfDestructDictionary? dict = ref CollectionsMarshal.GetValueRefOrAddDefault(_blockCache, address, out bool exists); - if (!exists) + if (!Bytes.AreEqual(before, after)) { - dict = new SelfDestructDictionary(StorageTree.EmptyBytes); + tracer.ReportStorageChange(address, before, after); } + } + } - dict.SelfDestruct(); + private Hash256 RecalculateRootHash(Address address) + { + StorageTree storageTree = GetOrCreateStorage(address); + storageTree.UpdateRootHash(); + return storageTree.RootHash; + } - // here it is important to make sure that we will not reuse the same tree when the contract is revived - // by means of CREATE 2 - notice that the cached trie may carry information about items that were not - // touched in this block, hence were not zeroed above - // TODO: how does it work with pruning? - _toUpdateRoots.Remove(address); - _storages[address] = new StorageTree(_trieStore.GetTrieStore(address.ToAccountPath), Keccak.EmptyTreeHash, _logManager); - } + /// + /// Clear all storage at specified address + /// + /// Contract address + public override void ClearStorage(Address address) + { + base.ClearStorage(address); - private class StorageTreeFactory : IStorageTreeFactory + ref SelfDestructDictionary? dict = ref CollectionsMarshal.GetValueRefOrAddDefault(_blockCache, address, out bool exists); + if (!exists) { - public StorageTree Create(Address address, IScopedTrieStore trieStore, Hash256 storageRoot, Hash256 stateRoot, ILogManager? logManager) - => new(trieStore, storageRoot, logManager); + dict = new SelfDestructDictionary(StorageTree.EmptyBytes); } - private sealed class SelfDestructDictionary(TValue destructedValue) - { - private bool _selfDestruct; - private readonly Dictionary _dictionary = new(Comparer.Instance); + dict.SelfDestruct(); - public void SelfDestruct() - { - _selfDestruct = true; - _dictionary.Clear(); - } + // here it is important to make sure that we will not reuse the same tree when the contract is revived + // by means of CREATE 2 - notice that the cached trie may carry information about items that were not + // touched in this block, hence were not zeroed above + // TODO: how does it work with pruning? + _toUpdateRoots.Remove(address); + _storages[address] = new StorageTree(_trieStore.GetTrieStore(address.ToAccountPath), Keccak.EmptyTreeHash, _logManager); + } - public ref TValue? GetValueRefOrAddDefault(UInt256 storageCellIndex, out bool exists) - { - ref TValue value = ref CollectionsMarshal.GetValueRefOrAddDefault(_dictionary, storageCellIndex, out exists); - if (!exists && _selfDestruct) - { - value = destructedValue; - exists = true; - } - return ref value; - } + private class StorageTreeFactory : IStorageTreeFactory + { + public StorageTree Create(Address address, IScopedTrieStore trieStore, Hash256 storageRoot, Hash256 stateRoot, ILogManager? logManager) + => new(trieStore, storageRoot, logManager); + } - public TValue? this[UInt256 key] + private sealed class SelfDestructDictionary(TValue destructedValue) + { + private bool _selfDestruct; + private readonly Dictionary _dictionary = new(Comparer.Instance); + + public void SelfDestruct() + { + _selfDestruct = true; + _dictionary.Clear(); + } + + public ref TValue? GetValueRefOrAddDefault(UInt256 storageCellIndex, out bool exists) + { + ref TValue value = ref CollectionsMarshal.GetValueRefOrAddDefault(_dictionary, storageCellIndex, out exists); + if (!exists && _selfDestruct) { - set => _dictionary[key] = value; + value = destructedValue; + exists = true; } + return ref value; + } - private sealed class Comparer : IEqualityComparer - { - public static Comparer Instance { get; } = new(); + public TValue? this[UInt256 key] + { + set => _dictionary[key] = value; + } - private Comparer() { } + private sealed class Comparer : IEqualityComparer + { + public static Comparer Instance { get; } = new(); - public bool Equals(UInt256 x, UInt256 y) - => Unsafe.As>(ref x) == Unsafe.As>(ref y); + private Comparer() { } - public int GetHashCode([DisallowNull] UInt256 obj) - => MemoryMarshal.AsBytes(MemoryMarshal.CreateReadOnlySpan(in obj, 1)).FastHash(); - } + public bool Equals(UInt256 x, UInt256 y) + => Unsafe.As>(ref x) == Unsafe.As>(ref y); + + public int GetHashCode([DisallowNull] UInt256 obj) + => MemoryMarshal.AsBytes(MemoryMarshal.CreateReadOnlySpan(in obj, 1)).FastHash(); } } } diff --git a/src/Nethermind/Nethermind.State/PersistentStorageProvider.cs.orig b/src/Nethermind/Nethermind.State/PersistentStorageProvider.cs.orig deleted file mode 100644 index 3a6d6af0af3..00000000000 --- a/src/Nethermind/Nethermind.State/PersistentStorageProvider.cs.orig +++ /dev/null @@ -1,595 +0,0 @@ -// SPDX-FileCopyrightText: 2022 Demerzel Solutions Limited -// SPDX-License-Identifier: LGPL-3.0-only - -using System; -using System.Collections.Concurrent; -using System.Collections.Generic; -using System.Diagnostics.CodeAnalysis; -using System.Linq; -using System.Runtime.CompilerServices; -using System.Runtime.InteropServices; -using System.Runtime.Intrinsics; -using System.Threading.Tasks; -using Nethermind.Core; -using Nethermind.Core.Collections; -using Nethermind.Core.Crypto; -using Nethermind.Core.Extensions; -using Nethermind.Int256; -using Nethermind.Logging; -using Nethermind.State.Tracing; -using Nethermind.Trie.Pruning; - -namespace Nethermind.State; - -using Nethermind.Core.Cpu; -/// -/// Manages persistent storage allowing for snapshotting and restoring -/// Persists data to ITrieStore -/// -internal sealed class PersistentStorageProvider : PartialStorageProviderBase -{ - private readonly ITrieStore _trieStore; - private readonly StateProvider _stateProvider; - private readonly ILogManager? _logManager; - internal readonly IStorageTreeFactory _storageTreeFactory; - private readonly Dictionary _storages = new(); - private readonly HashSet _toUpdateRoots = new(); - - /// - /// EIP-1283 - /// - private readonly Dictionary _originalValues = new(); - - private readonly HashSet _committedThisRound = new(); - private readonly Dictionary> _blockCache = new(4_096); - private readonly ConcurrentDictionary? _preBlockCache; - private readonly Func _loadFromTree; - - /// - /// Manages persistent storage allowing for snapshotting and restoring - /// Persists data to ITrieStore - /// - public PersistentStorageProvider(ITrieStore trieStore, - StateProvider stateProvider, - ILogManager logManager, - IStorageTreeFactory? storageTreeFactory, - ConcurrentDictionary? preBlockCache, - bool populatePreBlockCache) : base(logManager) - { - _trieStore = trieStore ?? throw new ArgumentNullException(nameof(trieStore)); - _stateProvider = stateProvider ?? throw new ArgumentNullException(nameof(stateProvider)); - _logManager = logManager ?? throw new ArgumentNullException(nameof(logManager)); - _storageTreeFactory = storageTreeFactory ?? new StorageTreeFactory(); - _preBlockCache = preBlockCache; - _populatePreBlockCache = populatePreBlockCache; - _loadFromTree = LoadFromTreeStorage; - } - - public Hash256 StateRoot { get; set; } = null!; - private readonly bool _populatePreBlockCache; - - /// - /// Reset the storage state - /// - public override void Reset(bool resizeCollections = true) - { - base.Reset(); - _blockCache.Clear(); - _storages.Clear(); - _originalValues.Clear(); - _committedThisRound.Clear(); - _toUpdateRoots.Clear(); - } - - /// - /// Get the current value at the specified location - /// - /// Storage location - /// Value at location - protected override ReadOnlySpan GetCurrentValue(in StorageCell storageCell) => - TryGetCachedValue(storageCell, out byte[]? bytes) ? bytes! : LoadFromTree(storageCell); - - /// - /// Return the original persistent storage value from the storage cell - /// - /// - /// - public byte[] GetOriginal(in StorageCell storageCell) - { - if (!_originalValues.TryGetValue(storageCell, out var value)) - { - throw new InvalidOperationException("Get original should only be called after get within the same caching round"); - } - - if (_transactionChangesSnapshots.TryPeek(out int snapshot)) - { - if (_intraBlockCache.TryGetValue(storageCell, out StackList stack)) - { - if (stack.TryGetSearchedItem(snapshot, out int lastChangeIndexBeforeOriginalSnapshot)) - { - return _changes[lastChangeIndexBeforeOriginalSnapshot].Value; - } - } - } - - return value; - } - - private HashSet? _tempToUpdateRoots; - /// - /// Called by Commit - /// Used for persistent storage specific logic - /// - /// Storage tracer - protected override void CommitCore(IStorageTracer tracer) - { - if (_logger.IsTrace) _logger.Trace("Committing storage changes"); - - int currentPosition = _changes.Count - 1; - if (currentPosition < 0) - { - return; - } - if (_changes[currentPosition].IsNull) - { - throw new InvalidOperationException($"Change at current position {currentPosition} was null when committing {nameof(PartialStorageProviderBase)}"); - } - - HashSet toUpdateRoots = (_tempToUpdateRoots ??= new()); - - bool isTracing = tracer.IsTracingStorage; - Dictionary? trace = null; - if (isTracing) - { - trace = new Dictionary(); - } - - for (int i = 0; i <= currentPosition; i++) - { - Change change = _changes[currentPosition - i]; - if (!isTracing && change!.ChangeType == ChangeType.JustCache) - { - continue; - } - - if (_committedThisRound.Contains(change!.StorageCell)) - { - if (isTracing && change.ChangeType == ChangeType.JustCache) - { - trace![change.StorageCell] = new ChangeTrace(change.Value, trace[change.StorageCell].After); - } - - continue; - } - - if (isTracing && change.ChangeType == ChangeType.JustCache) - { - tracer!.ReportStorageRead(change.StorageCell); - } - - _committedThisRound.Add(change.StorageCell); - - if (change.ChangeType == ChangeType.Destroy) - { - continue; - } - - int forAssertion = _intraBlockCache[change.StorageCell].Pop(); - if (forAssertion != currentPosition - i) - { - throw new InvalidOperationException($"Expected checked value {forAssertion} to be equal to {currentPosition} - {i}"); - } - - switch (change.ChangeType) - { - case ChangeType.Destroy: - break; - case ChangeType.JustCache: - break; - case ChangeType.Update: - if (_logger.IsTrace) - { - _logger.Trace($" Update {change.StorageCell.Address}_{change.StorageCell.Index} V = {change.Value.ToHexString(true)}"); - } - - SaveToTree(toUpdateRoots, change); - - if (isTracing) - { - trace![change.StorageCell] = new ChangeTrace(change.Value); - } - - break; - default: - throw new ArgumentOutOfRangeException(); - } - } - - foreach (AddressAsKey address in toUpdateRoots) - { - // since the accounts could be empty accounts that are removing (EIP-158) - if (_stateProvider.AccountExists(address)) - { - _toUpdateRoots.Add(address); - } - else - { - _toUpdateRoots.Remove(address); - _storages.Remove(address); - } - } - toUpdateRoots.Clear(); - - base.CommitCore(tracer); - _originalValues.Clear(); - _committedThisRound.Clear(); - - if (isTracing) - { - ReportChanges(tracer!, trace!); - } - } - - protected override void CommitStorageRoots() - { - if (_toUpdateRoots.Count == 0) - { - return; - } - - // Is overhead of parallel foreach worth it? - if (_toUpdateRoots.Count <= 4) - { - UpdateRootHashesSingleThread(); - } - else - { - UpdateRootHashesMultiThread(); - } - - void UpdateRootHashesSingleThread() - { - foreach (KeyValuePair kvp in _storages) - { - if (!_toUpdateRoots.Contains(kvp.Key)) - { - // Wasn't updated don't recalculate - continue; - } - - StorageTree storageTree = kvp.Value; - storageTree.UpdateRootHash(canBeParallel: true); - _stateProvider.UpdateStorageRoot(address: kvp.Key, storageTree.RootHash); - } - } - - void UpdateRootHashesMultiThread() - { - // We can recalculate the roots in parallel as they are all independent tries - Parallel.ForEach(_storages, RuntimeInformation.ParallelOptionsLogicalCores, kvp => - { - if (!_toUpdateRoots.Contains(kvp.Key)) - { - // Wasn't updated don't recalculate - return; - } - StorageTree storageTree = kvp.Value; - storageTree.UpdateRootHash(canBeParallel: false); - }); - - // Update the storage roots in the main thread non in parallel - foreach (KeyValuePair kvp in _storages) - { - if (!_toUpdateRoots.Contains(kvp.Key)) - { - continue; - } - - // Update the storage root for the Account - _stateProvider.UpdateStorageRoot(address: kvp.Key, kvp.Value.RootHash); - } - - } - } - - private void SaveToTree(HashSet toUpdateRoots, Change change) - { - if (_originalValues.TryGetValue(change.StorageCell, out byte[] initialValue) && - initialValue.AsSpan().SequenceEqual(change.Value)) - { - // no need to update the tree if the value is the same - return; - } - - StorageTree tree = GetOrCreateStorage(change.StorageCell.Address); - Db.Metrics.StorageTreeWrites++; - toUpdateRoots.Add(change.StorageCell.Address); - tree.Set(change.StorageCell.Index, change.Value); - - ref SelfDestructDictionary? dict = ref CollectionsMarshal.GetValueRefOrAddDefault(_blockCache, change.StorageCell.Address, out bool exists); - if (!exists) - { - dict = new SelfDestructDictionary(StorageTree.EmptyBytes); - } - - dict[change.StorageCell.Index] = change.Value; - } - - /// - /// Commit persistent storage trees - /// - /// Current block number - public void CommitTrees(IBlockCommitter blockCommitter) - { - // Note: These all runs in about 0.4ms. So the little overhead like attempting to sort the tasks - // may make it worst. Always check on mainnet. - - using ArrayPoolList commitTask = new ArrayPoolList(_storages.Count); - foreach (KeyValuePair storage in _storages) - { - if (!_toUpdateRoots.Contains(storage.Key)) - { - continue; - } - - if (blockCommitter.TryRequestConcurrencyQuota()) - { - commitTask.Add(Task.Factory.StartNew((ctx) => - { - StorageTree st = (StorageTree)ctx; - st.Commit(); - blockCommitter.ReturnConcurrencyQuota(); - }, storage.Value)); - } - else - { - storage.Value.Commit(); - } - } - - Task.WaitAll(commitTask); - - _toUpdateRoots.Clear(); - // only needed here as there is no control over cached storage size otherwise - _storages.Clear(); - } - - private StorageTree GetOrCreateStorage(Address address) - { - ref StorageTree? value = ref CollectionsMarshal.GetValueRefOrAddDefault(_storages, address, out bool exists); - if (!exists) - { - value = _storageTreeFactory.Create(address, _trieStore.GetTrieStore(address.ToAccountPath), _stateProvider.GetStorageRoot(address), StateRoot, _logManager); - } - - return value; - } - - public void WarmUp(in StorageCell storageCell, bool isEmpty) - { - if (isEmpty) - { - if (_preBlockCache is not null) - { -<<<<<<< HEAD - if (!_toUpdateRoots.Contains(storage.Key)) - { - continue; - } - - if (blockCommitter.TryRequestConcurrencyQuota()) - { - commitTask.Add(Task.Factory.StartNew((ctx) => - { - StorageTree st = (StorageTree)ctx; - st.Commit(); - blockCommitter.ReturnConcurrencyQuota(); - }, storage.Value)); - } - else - { - storage.Value.Commit(); - } - } - - Task.WaitAll(commitTask.AsSpan()); - - _toUpdateRoots.Clear(); - // only needed here as there is no control over cached storage size otherwise - _storages.Clear(); - } - - private StorageTree GetOrCreateStorage(Address address) - { - ref StorageTree? value = ref CollectionsMarshal.GetValueRefOrAddDefault(_storages, address, out bool exists); - if (!exists) - { - value = _storageTreeFactory.Create(address, _trieStore.GetTrieStore(address.ToAccountPath), _stateProvider.GetStorageRoot(address), StateRoot, _logManager); - } - - return value; - } - - public void WarmUp(in StorageCell storageCell, bool isEmpty) - { - if (isEmpty) - { - if (_preBlockCache is not null) - { - _preBlockCache[storageCell] = []; - } - } - else - { - LoadFromTree(in storageCell); -======= - _preBlockCache[storageCell] = []; ->>>>>>> origin/master - } - } - else - { - LoadFromTree(in storageCell); - } - } - - private ReadOnlySpan LoadFromTree(in StorageCell storageCell) - { - ref SelfDestructDictionary? dict = ref CollectionsMarshal.GetValueRefOrAddDefault(_blockCache, storageCell.Address, out bool exists); - if (!exists) - { - dict = new SelfDestructDictionary(StorageTree.EmptyBytes); - } - - ref byte[]? value = ref dict.GetValueRefOrAddDefault(storageCell.Index, out exists); - if (!exists) - { - value = !_populatePreBlockCache ? - LoadFromTreeReadPreWarmCache(in storageCell) : - LoadFromTreePopulatePrewarmCache(in storageCell); - } - else - { - Db.Metrics.IncrementStorageTreeCache(); - } - - if (!storageCell.IsHash) PushToRegistryOnly(storageCell, value); - return value; - } - - private byte[] LoadFromTreePopulatePrewarmCache(in StorageCell storageCell) - { - long priorReads = Db.Metrics.ThreadLocalStorageTreeReads; - - byte[] value = _preBlockCache is not null - ? _preBlockCache.GetOrAdd(storageCell, _loadFromTree) - : _loadFromTree(storageCell); - - if (Db.Metrics.ThreadLocalStorageTreeReads == priorReads) - { - // Read from Concurrent Cache - Db.Metrics.IncrementStorageTreeCache(); - } - return value; - } - - private byte[] LoadFromTreeReadPreWarmCache(in StorageCell storageCell) - { - if (_preBlockCache?.TryGetValue(storageCell, out byte[] value) ?? false) - { - Db.Metrics.IncrementStorageTreeCache(); - } - else - { - value = _loadFromTree(storageCell); - } - return value; - } - - private byte[] LoadFromTreeStorage(StorageCell storageCell) - { - StorageTree tree = GetOrCreateStorage(storageCell.Address); - Db.Metrics.IncrementStorageTreeReads(); - return !storageCell.IsHash ? tree.Get(storageCell.Index) : tree.GetArray(storageCell.Hash.Bytes); - } - - private void PushToRegistryOnly(in StorageCell cell, byte[] value) - { - StackList stack = SetupRegistry(cell); - _originalValues[cell] = value; - stack.Push(_changes.Count); - _changes.Add(new Change(ChangeType.JustCache, cell, value)); - } - - private static void ReportChanges(IStorageTracer tracer, Dictionary trace) - { - foreach ((StorageCell address, ChangeTrace change) in trace) - { - byte[] before = change.Before; - byte[] after = change.After; - - if (!Bytes.AreEqual(before, after)) - { - tracer.ReportStorageChange(address, before, after); - } - } - } - - private Hash256 RecalculateRootHash(Address address) - { - StorageTree storageTree = GetOrCreateStorage(address); - storageTree.UpdateRootHash(); - return storageTree.RootHash; - } - - /// - /// Clear all storage at specified address - /// - /// Contract address - public override void ClearStorage(Address address) - { - base.ClearStorage(address); - - ref SelfDestructDictionary? dict = ref CollectionsMarshal.GetValueRefOrAddDefault(_blockCache, address, out bool exists); - if (!exists) - { - dict = new SelfDestructDictionary(StorageTree.EmptyBytes); - } - - dict.SelfDestruct(); - - // here it is important to make sure that we will not reuse the same tree when the contract is revived - // by means of CREATE 2 - notice that the cached trie may carry information about items that were not - // touched in this block, hence were not zeroed above - // TODO: how does it work with pruning? - _toUpdateRoots.Remove(address); - _storages[address] = new StorageTree(_trieStore.GetTrieStore(address.ToAccountPath), Keccak.EmptyTreeHash, _logManager); - } - - private class StorageTreeFactory : IStorageTreeFactory - { - public StorageTree Create(Address address, IScopedTrieStore trieStore, Hash256 storageRoot, Hash256 stateRoot, ILogManager? logManager) - => new(trieStore, storageRoot, logManager); - } - - private sealed class SelfDestructDictionary(TValue destructedValue) - { - private bool _selfDestruct; - private readonly Dictionary _dictionary = new(Comparer.Instance); - - public void SelfDestruct() - { - _selfDestruct = true; - _dictionary.Clear(); - } - - public ref TValue? GetValueRefOrAddDefault(UInt256 storageCellIndex, out bool exists) - { - ref TValue value = ref CollectionsMarshal.GetValueRefOrAddDefault(_dictionary, storageCellIndex, out exists); - if (!exists && _selfDestruct) - { - value = destructedValue; - exists = true; - } - return ref value; - } - - public TValue? this[UInt256 key] - { - set => _dictionary[key] = value; - } - - private sealed class Comparer : IEqualityComparer - { - public static Comparer Instance { get; } = new(); - - private Comparer() { } - - public bool Equals(UInt256 x, UInt256 y) - => Unsafe.As>(ref x) == Unsafe.As>(ref y); - - public int GetHashCode([DisallowNull] UInt256 obj) - => MemoryMarshal.AsBytes(MemoryMarshal.CreateReadOnlySpan(in obj, 1)).FastHash(); - } - } -} diff --git a/src/Nethermind/Nethermind.Trie/Pruning/TrieStore.cs.orig b/src/Nethermind/Nethermind.Trie/Pruning/TrieStore.cs.orig deleted file mode 100644 index d3fa63ff4f2..00000000000 --- a/src/Nethermind/Nethermind.Trie/Pruning/TrieStore.cs.orig +++ /dev/null @@ -1,1385 +0,0 @@ -// SPDX-FileCopyrightText: 2022 Demerzel Solutions Limited -// SPDX-License-Identifier: LGPL-3.0-only - -using System; -using System.Collections.Concurrent; -using System.Diagnostics; -using System.Diagnostics.CodeAnalysis; -using System.Linq; -using System.Runtime.CompilerServices; -using System.Threading; -using System.Threading.Tasks; -using Nethermind.Core; -using Nethermind.Core.Collections; -using Nethermind.Core.Crypto; -using Nethermind.Core.Extensions; -using Nethermind.Logging; - -namespace Nethermind.Trie.Pruning; - -/// -/// Trie store helps to manage trie commits block by block. -/// If persistence and pruning are needed they have a chance to execute their behaviour on commits. -/// -public class TrieStore : ITrieStore, IPruningTrieStore -{ - private const int ShardedDirtyNodeCount = 256; - - private int _isFirst; - - private readonly TrieStoreDirtyNodesCache[] _dirtyNodes = []; - private readonly Task[] _dirtyNodesTasks = []; - private readonly ConcurrentDictionary[] _persistedHashes = []; - private readonly Action _persistedNodeRecorder; - private readonly Task[] _disposeTasks = new Task[Environment.ProcessorCount]; - - // This seems to attempt prevent multiple block processing at the same time and along with pruning at the same time. - private readonly object _dirtyNodesLock = new object(); - - private readonly bool _livePruningEnabled = false; - - private bool _lastPersistedReachedReorgBoundary; - private Task _pruningTask = Task.CompletedTask; - private readonly CancellationTokenSource _pruningTaskCancellationTokenSource = new(); - - public TrieStore(IKeyValueStoreWithBatching? keyValueStore, ILogManager? logManager) - : this(keyValueStore, No.Pruning, Pruning.Persist.EveryBlock, logManager) - { - } - - public TrieStore(INodeStorage nodeStorage, ILogManager? logManager) - : this(nodeStorage, No.Pruning, Pruning.Persist.EveryBlock, logManager) - { - } - - public TrieStore( - IKeyValueStoreWithBatching? keyValueStore, - IPruningStrategy? pruningStrategy, - IPersistenceStrategy? persistenceStrategy, - ILogManager? logManager) : this(new NodeStorage(keyValueStore), pruningStrategy, persistenceStrategy, logManager) - { - } - - public TrieStore( - INodeStorage? nodeStorage, - IPruningStrategy? pruningStrategy, - IPersistenceStrategy? persistenceStrategy, - ILogManager? logManager) - { - _logger = logManager?.GetClassLogger() ?? throw new ArgumentNullException(nameof(logManager)); - _nodeStorage = nodeStorage ?? throw new ArgumentNullException(nameof(nodeStorage)); - _pruningStrategy = pruningStrategy ?? throw new ArgumentNullException(nameof(pruningStrategy)); - _persistenceStrategy = persistenceStrategy ?? throw new ArgumentNullException(nameof(persistenceStrategy)); - _publicStore = new TrieKeyValueStore(this); - _persistedNodeRecorder = PersistedNodeRecorder; - - if (pruningStrategy.PruningEnabled) - { - _dirtyNodes = new TrieStoreDirtyNodesCache[ShardedDirtyNodeCount]; - _dirtyNodesTasks = new Task[ShardedDirtyNodeCount]; - _persistedHashes = new ConcurrentDictionary[ShardedDirtyNodeCount]; - for (int i = 0; i < ShardedDirtyNodeCount; i++) - { - _dirtyNodes[i] = new TrieStoreDirtyNodesCache(this, _pruningStrategy.TrackedPastKeyCount / ShardedDirtyNodeCount, !_nodeStorage.RequirePath, _logger); - _persistedHashes[i] = new ConcurrentDictionary(); - } - } - - if (pruningStrategy.PruningEnabled && pruningStrategy.TrackedPastKeyCount > 0 && nodeStorage.RequirePath) - { - _livePruningEnabled = true; - } - } - - public IScopedTrieStore GetTrieStore(Hash256? address) => new ScopedTrieStore(this, address); - - public long LastPersistedBlockNumber - { - get => _latestPersistedBlockNumber; - private set - { - if (value != _latestPersistedBlockNumber) - { - Metrics.LastPersistedBlockNumber = value; - _latestPersistedBlockNumber = value; - _lastPersistedReachedReorgBoundary = false; - } - } - } - - public long MemoryUsedByDirtyCache - { - get => _memoryUsedByDirtyCache; - set - { - Metrics.MemoryUsedByCache = value; - _memoryUsedByDirtyCache = value; - } - } - - public void IncrementMemoryUsedByDirtyCache(long nodeMemoryUsage) - { - Metrics.MemoryUsedByCache = Interlocked.Add(ref _memoryUsedByDirtyCache, nodeMemoryUsage); - } - - public int CommittedNodesCount - { - get => _committedNodesCount; - private set - { - Metrics.CommittedNodesCount = value; - _committedNodesCount = value; - } - } - - private void IncrementCommittedNodesCount() - { - Metrics.CommittedNodesCount = Interlocked.Increment(ref _committedNodesCount); - } - - public int PersistedNodesCount - { - get => _persistedNodesCount; - private set - { - Metrics.PersistedNodeCount = value; - _persistedNodesCount = value; - } - } - - private void IncrementPersistedNodesCount() - { - Metrics.PersistedNodeCount = Interlocked.Increment(ref _persistedNodesCount); - } - - public int CachedNodesCount - { - get - { - int count = DirtyNodesCount(); - Metrics.CachedNodesCount = count; - return count; - } - } - - private void CommitAndInsertToDirtyNodes(long blockNumber, Hash256? address, ref TreePath path, in NodeCommitInfo nodeCommitInfo) - { - Debug.Assert(_pruningStrategy.PruningEnabled); - - if (_logger.IsTrace) Trace(blockNumber, in nodeCommitInfo); - if (!nodeCommitInfo.IsEmptyBlockMarker && !nodeCommitInfo.Node.IsBoundaryProofNode) - { - TrieNode node = nodeCommitInfo.Node; - - if (node.Keccak is null) - { - ThrowUnknownHash(node); - } - - if (node.LastSeen >= 0) - { - ThrowNodeHasBeenSeen(blockNumber, node); - } - - node = SaveOrReplaceInDirtyNodesCache(address, ref path, nodeCommitInfo, node); - node.LastSeen = Math.Max(blockNumber, node.LastSeen); - - IncrementCommittedNodesCount(); - } - - [MethodImpl(MethodImplOptions.NoInlining)] - void Trace(long blockNumber, in NodeCommitInfo nodeCommitInfo) - { - _logger.Trace($"Committing {nodeCommitInfo} at {blockNumber}"); - } - - [DoesNotReturn] - [StackTraceHidden] - static void ThrowUnknownHash(TrieNode node) => throw new TrieStoreException($"The hash of {node} should be known at the time of committing."); - - [DoesNotReturn] - [StackTraceHidden] - static void ThrowNodeHasBeenSeen(long blockNumber, TrieNode node) => throw new TrieStoreException($"{nameof(TrieNode.LastSeen)} set on {node} committed at {blockNumber}."); - } - - private void CommitAndPersistNode(Hash256? address, ref TreePath path, in NodeCommitInfo nodeCommitInfo, WriteFlags writeFlags, INodeStorage.WriteBatch? writeBatch) - { - Debug.Assert(!_pruningStrategy.PruningEnabled); - - if (!nodeCommitInfo.IsEmptyBlockMarker && !nodeCommitInfo.Node.IsBoundaryProofNode) - { - TrieNode node = nodeCommitInfo.Node; - - if (node.Keccak is null) - { - ThrowUnknownHash(node); - } - - PersistNode(address, path, node, writeBatch!, writeFlags); - - IncrementCommittedNodesCount(); - } - - [DoesNotReturn] - [StackTraceHidden] - static void ThrowUnknownHash(TrieNode node) => throw new TrieStoreException($"The hash of {node} should be known at the time of committing."); - } - - private int GetNodeShardIdx(in TreePath path, Hash256 hash) => - // When enabled, the shard have dictionaries for tracking past path hash also. - // So the same path need to be in the same shard for the remove logic to work. - // Using the address first byte however, causes very uneven distribution. So the path is used. - _livePruningEnabled ? path.Path.Bytes[0] : hash.Bytes[0]; - - private int GetNodeShardIdx(in TrieStoreDirtyNodesCache.Key key) => GetNodeShardIdx(key.Path, key.Keccak); - - private TrieStoreDirtyNodesCache GetDirtyNodeShard(in TrieStoreDirtyNodesCache.Key key) => _dirtyNodes[GetNodeShardIdx(key)]; - - private int DirtyNodesCount() - { - int count = 0; - foreach (TrieStoreDirtyNodesCache dirtyNode in _dirtyNodes) - { - count += dirtyNode.Count; - } - return count; - } - - private bool DirtyNodesTryGetValue(in TrieStoreDirtyNodesCache.Key key, out TrieNode node) => - GetDirtyNodeShard(key).TryGetValue(key, out node); - - private void DirtyNodesSaveInCache(in TrieStoreDirtyNodesCache.Key key, TrieNode node) => - GetDirtyNodeShard(key).SaveInCache(key, node); - - private bool DirtyNodesIsNodeCached(TrieStoreDirtyNodesCache.Key key) => - GetDirtyNodeShard(key).IsNodeCached(key); - - private TrieNode DirtyNodesFromCachedRlpOrUnknown(TrieStoreDirtyNodesCache.Key key) => - GetDirtyNodeShard(key).FromCachedRlpOrUnknown(key); - - private TrieNode DirtyNodesFindCachedOrUnknown(TrieStoreDirtyNodesCache.Key key) => - GetDirtyNodeShard(key).FindCachedOrUnknown(key); - - private TrieNode SaveOrReplaceInDirtyNodesCache(Hash256? address, ref TreePath path, NodeCommitInfo nodeCommitInfo, TrieNode node) - { - TrieStoreDirtyNodesCache.Key key = new(address, path, node.Keccak); - if (DirtyNodesTryGetValue(in key, out TrieNode cachedNodeCopy)) - { - Metrics.LoadedFromCacheNodesCount++; - if (!ReferenceEquals(cachedNodeCopy, node)) - { - if (_logger.IsTrace) Trace(node, cachedNodeCopy); - cachedNodeCopy.ResolveKey(GetTrieStore(address), ref path, nodeCommitInfo.IsRoot); - if (node.Keccak != cachedNodeCopy.Keccak) - { - ThrowNodeIsNotSame(node, cachedNodeCopy); - } - - if (!nodeCommitInfo.IsRoot) - { - nodeCommitInfo.NodeParent!.ReplaceChildRef(nodeCommitInfo.ChildPositionAtParent, cachedNodeCopy); - } - - node = cachedNodeCopy; - Metrics.ReplacedNodesCount++; - } - } - else - { - DirtyNodesSaveInCache(key, node); - } - - return node; - - [MethodImpl(MethodImplOptions.NoInlining)] - void Trace(TrieNode node, TrieNode cachedNodeCopy) - { - _logger.Trace($"Replacing {node} with its cached copy {cachedNodeCopy}."); - } - - [DoesNotReturn] - [StackTraceHidden] - static void ThrowNodeIsNotSame(TrieNode node, TrieNode cachedNodeCopy) => - throw new InvalidOperationException($"The hash of replacement node {cachedNodeCopy} is not the same as the original {node}."); - } - - public ICommitter BeginCommit(Hash256? address, TrieNode? root, WriteFlags writeFlags) - { - if (_pruningStrategy.PruningEnabled) - { - if (_currentBlockCommitter is null) throw new InvalidOperationException($"With pruning triestore, {nameof(BeginBlockCommit)} must be called."); - } - - return _currentBlockCommitter is not null - // Note, archive node still use this path. This is because it needs the block commit set to handle reorg announcement. - ? _currentBlockCommitter.GetTrieCommitter(address, root, writeFlags) - // This happens when there are no block involved, such as during snap sync or just calculating patricia root. - : new NonPruningTrieStoreCommitter(this, address, _nodeStorage.StartWriteBatch(), writeFlags); - } - - public IBlockCommitter BeginBlockCommit(long blockNumber) - { - if (_pruningStrategy.PruningEnabled) - { - if (_currentBlockCommitter is not null) - { - throw new InvalidOperationException("Cannot start a new block commit when an existing one is still not closed"); - } - - Monitor.Enter(_dirtyNodesLock); - } - - _currentBlockCommitter = new BlockCommitter(this, CreateCommitSet(blockNumber)); - return _currentBlockCommitter; - } - - private void FinishBlockCommit(BlockCommitSet set, TrieNode? root) - { - if (_logger.IsTrace) _logger.Trace($"Enqueued blocks {_commitSetQueue?.Count ?? 0}"); - set.Seal(root); - - bool shouldPersistSnapshot = _persistenceStrategy.ShouldPersist(set.BlockNumber); - if (shouldPersistSnapshot) - { - // For safety we prefer to commit half of the batch rather than not commit at all. - // Generally hanging nodes are not a problem in the DB but anything missing from the DB is. - using INodeStorage.WriteBatch currentBatch = _nodeStorage.StartWriteBatch(); - ParallelPersistBlockCommitSet(set); - } - - set.Prune(); - - _currentBlockCommitter = null; - - if (_pruningStrategy.PruningEnabled) - Monitor.Exit(_dirtyNodesLock); - - Prune(); - } - - - public event EventHandler? ReorgBoundaryReached; - - // Used in testing to not have to wait for condition. - public event EventHandler OnMemoryPruneCompleted; - - public byte[]? TryLoadRlp(Hash256? address, in TreePath path, Hash256 keccak, INodeStorage? nodeStorage, ReadFlags readFlags = ReadFlags.None) - { - nodeStorage ??= _nodeStorage; - byte[]? rlp = nodeStorage.Get(address, path, keccak, readFlags); - - if (rlp is not null) - { - Metrics.LoadedFromDbNodesCount++; - } - - return rlp; - } - - - public byte[] LoadRlp(Hash256? address, in TreePath path, Hash256 keccak, INodeStorage? nodeStorage, ReadFlags readFlags = ReadFlags.None) - { - byte[]? rlp = TryLoadRlp(address, path, keccak, nodeStorage, readFlags); - if (rlp is null) - { - ThrowMissingNode(keccak); - } - - return rlp; - - [DoesNotReturn] - [StackTraceHidden] - static void ThrowMissingNode(Hash256 keccak) - { - throw new TrieNodeException($"Node {keccak} is missing from the DB", keccak); - } - } - - public virtual byte[]? LoadRlp(Hash256? address, in TreePath path, Hash256 hash, ReadFlags flags = ReadFlags.None) => LoadRlp(address, path, hash, null, flags); - public virtual byte[]? TryLoadRlp(Hash256? address, in TreePath path, Hash256 hash, ReadFlags flags = ReadFlags.None) => TryLoadRlp(address, path, hash, null, flags); - - public virtual bool IsPersisted(Hash256? address, in TreePath path, in ValueHash256 keccak) - { - byte[]? rlp = _nodeStorage.Get(address, path, keccak, ReadFlags.None); - - if (rlp is null) - { - return false; - } - - Metrics.LoadedFromDbNodesCount++; - - return true; - } - - public IReadOnlyTrieStore AsReadOnly(INodeStorage? store) => - new ReadOnlyTrieStore(this, store); - - public bool IsNodeCached(Hash256? address, in TreePath path, Hash256? hash) => DirtyNodesIsNodeCached(new TrieStoreDirtyNodesCache.Key(address, path, hash)); - - public TrieNode FindCachedOrUnknown(Hash256? address, in TreePath path, Hash256? hash) => - FindCachedOrUnknown(address, path, hash, false); - - internal virtual TrieNode FindCachedOrUnknown(Hash256? address, in TreePath path, Hash256? hash, bool isReadOnly) - { - ArgumentNullException.ThrowIfNull(hash); - - if (!_pruningStrategy.PruningEnabled) - { - return new TrieNode(NodeType.Unknown, hash); - } - - TrieStoreDirtyNodesCache.Key key = new TrieStoreDirtyNodesCache.Key(address, path, hash); - return FindCachedOrUnknown(key, isReadOnly); - } - - private TrieNode FindCachedOrUnknown(TrieStoreDirtyNodesCache.Key key, bool isReadOnly) - { - return isReadOnly ? DirtyNodesFromCachedRlpOrUnknown(key) : DirtyNodesFindCachedOrUnknown(key); - } - - // Used only in tests - public void Dump() - { - foreach (TrieStoreDirtyNodesCache? dirtyNode in _dirtyNodes) - { - dirtyNode.Dump(); - } - } - - public void Prune() - { - if (_pruningStrategy.ShouldPrune(MemoryUsedByDirtyCache) && _pruningTask.IsCompleted) - { - _pruningTask = Task.Run(() => - { - try - { - // Flush ahead of time so that memtable is empty which prevent stalling when writing nodes. - // Note, the WriteBufferSize * WriteBufferNumber need to be more than about 20% of pruning cache - // otherwise, it may not fit the whole dirty cache. - // Additionally, if (WriteBufferSize * (WriteBufferNumber - 1)) is already more than 20% of pruning - // cache, it is likely that there are enough space for it on most time, except for syncing maybe. - _nodeStorage.Flush(onlyWal: false); - lock (_dirtyNodesLock) - { - long start = Stopwatch.GetTimestamp(); - if (_logger.IsDebug) _logger.Debug($"Locked {nameof(TrieStore)} for pruning."); - - long memoryUsedByDirtyCache = MemoryUsedByDirtyCache; - if (!_pruningTaskCancellationTokenSource.IsCancellationRequested && - _pruningStrategy.ShouldPrune(memoryUsedByDirtyCache)) - { - // Most of the time in memory pruning is on `PrunePersistedRecursively`. So its - // usually faster to just SaveSnapshot causing most of the entry to be persisted. - // Not saving snapshot just save about 5% of memory at most of the time, causing - // an elevated pruning a few blocks after making it not very effective especially - // on constant block processing such as during forward sync where it can take up to - // 30% of the total time on halfpath as the block processing portion got faster. - // - // With halfpath's live pruning, there is a slight complication, the currently loaded - // persisted node have a pretty good hit rate and tend to conflict with the persisted - // nodes (address,path) entry on second PruneCache. So pruning them ahead of time - // really helps increase nodes that can be removed. - PruneCache(skipRecalculateMemory: true); - - SaveSnapshot(); - - PruneCache(); - - TimeSpan sw = Stopwatch.GetElapsedTime(start); - long ms = (long)sw.TotalMilliseconds; - Metrics.PruningTime = ms; - if (_logger.IsInfo) _logger.Info($"Executed memory prune. Took {ms:0.##} ms. From {memoryUsedByDirtyCache / 1.MiB()}MB to {MemoryUsedByDirtyCache / 1.MiB()}MB"); - } - } - - if (_logger.IsDebug) _logger.Debug($"Pruning finished. Unlocked {nameof(TrieStore)}."); - } - catch (Exception e) - { - if (_logger.IsError) _logger.Error("Pruning failed with exception.", e); - } - }); - - _pruningTask.ContinueWith((_) => - { - OnMemoryPruneCompleted?.Invoke(this, EventArgs.Empty); - }); - } - } - - private bool SaveSnapshot() - { - if (_pruningStrategy.ShouldPrune(MemoryUsedByDirtyCache)) - { - if (_logger.IsDebug) _logger.Debug("Elevated pruning starting"); - - int count = _commitSetQueue?.Count ?? 0; - if (count == 0) return false; - - using ArrayPoolList toAddBack = new(count); - using ArrayPoolList candidateSets = new(count); - while (_commitSetQueue.TryDequeue(out BlockCommitSet frontSet)) - { - if (frontSet!.BlockNumber >= LatestCommittedBlockNumber - _pruningStrategy.MaxDepth) - { - toAddBack.Add(frontSet); - } - else if (candidateSets.Count > 0 && candidateSets[0].BlockNumber == frontSet.BlockNumber) - { - candidateSets.Add(frontSet); - } - else if (candidateSets.Count == 0 || frontSet.BlockNumber > candidateSets[0].BlockNumber) - { - candidateSets.Clear(); - candidateSets.Add(frontSet); - } - } - - // TODO: Find a way to not have to re-add everything - for (int index = 0; index < toAddBack.Count; index++) - { - _commitSetQueue.Enqueue(toAddBack[index]); - } - - - bool shouldDeletePersistedNode = - // Its disabled - _livePruningEnabled && - // Full pruning need to visit all node, so can't delete anything. - !_persistenceStrategy.IsFullPruning && - // If more than one candidate set, its a reorg, we can't remove node as persisted node may not be canonical - candidateSets.Count == 1; - - Action? persistedNodeRecorder = shouldDeletePersistedNode ? _persistedNodeRecorder : null; - - for (int index = 0; index < candidateSets.Count; index++) - { - BlockCommitSet blockCommitSet = candidateSets[index]; - if (_logger.IsDebug) _logger.Debug($"Elevated pruning for candidate {blockCommitSet.BlockNumber}"); - ParallelPersistBlockCommitSet(blockCommitSet, persistedNodeRecorder); - } - - Task deleteTask = shouldDeletePersistedNode ? RemovePastKeys() : Task.CompletedTask; - - Task RemovePastKeys() - { - for (int index = 0; index < _dirtyNodes.Length; index++) - { - int i = index; - _dirtyNodesTasks[index] = Task.Run(() => - { - _dirtyNodes[i].RemovePastKeys(_persistedHashes[i], _nodeStorage); - _persistedHashes[i].NoResizeClear(); - }); - } - - return Task.WhenAll(_dirtyNodesTasks); - } - - AnnounceReorgBoundaries(); - deleteTask.Wait(); - - if (_livePruningEnabled) - { - foreach (TrieStoreDirtyNodesCache dirtyNode in _dirtyNodes) - { - dirtyNode.CleanObsoletePersistedLastSeen(); - } - } - - if (candidateSets.Count > 0) - { - return true; - } - - _commitSetQueue.TryPeek(out BlockCommitSet? uselessFrontSet); - if (_logger.IsDebug) _logger.Debug($"Found no candidate for elevated pruning (sets: {_commitSetQueue.Count}, earliest: {uselessFrontSet?.BlockNumber}, newest kept: {LatestCommittedBlockNumber}, reorg depth {_pruningStrategy.MaxDepth})"); - } - - return false; - } - - private void PersistedNodeRecorder(TreePath treePath, Hash256 address, TrieNode tn) - { - if (treePath.Length <= TinyTreePath.MaxNibbleLength) - { - int shardIdx = GetNodeShardIdx(treePath, tn.Keccak); - - HashAndTinyPath key = new(address, new TinyTreePath(treePath)); - - _persistedHashes[shardIdx].AddOrUpdate(key, _ => tn.Keccak, (_, _) => null); - } - } - - /// - /// This method is responsible for reviewing the nodes that are directly in the cache and - /// removing ones that are either no longer referenced or already persisted. - /// - /// - private void PruneCache(bool skipRecalculateMemory = false) - { - if (_logger.IsDebug) _logger.Debug($"Pruning nodes {MemoryUsedByDirtyCache / 1.MB()} MB , last persisted block: {LastPersistedBlockNumber} current: {LatestCommittedBlockNumber}."); - long start = Stopwatch.GetTimestamp(); - - long newMemory = 0; - - for (int index = 0; index < _dirtyNodes.Length; index++) - { - TrieStoreDirtyNodesCache dirtyNode = _dirtyNodes[index]; - _dirtyNodesTasks[index] = Task.Run(() => - { - long shardSize = dirtyNode.PruneCache(skipRecalculateMemory); - Interlocked.Add(ref newMemory, shardSize); - }); - } - - Task.WaitAll(_dirtyNodesTasks); - - if (!skipRecalculateMemory) MemoryUsedByDirtyCache = newMemory; - _ = CachedNodesCount; // Setter also update the count - - if (_logger.IsDebug) _logger.Debug($"Finished pruning nodes in {(long)Stopwatch.GetElapsedTime(start).TotalMilliseconds}ms {MemoryUsedByDirtyCache / 1.MB()} MB, last persisted block: {LastPersistedBlockNumber} current: {LatestCommittedBlockNumber}."); - } - - public void ClearCache() - { - foreach (TrieStoreDirtyNodesCache dirtyNode in _dirtyNodes) - { - dirtyNode.Clear(); - } - } - - public void Dispose() - { - if (_logger.IsDebug) _logger.Debug("Disposing trie"); - _pruningTaskCancellationTokenSource.Cancel(); - _pruningTask.Wait(); - PersistOnShutdown(); - } - - public void WaitForPruning() - { - _pruningTask.Wait(); - } - - protected readonly INodeStorage _nodeStorage; - - private readonly TrieKeyValueStore _publicStore; - - private readonly IPruningStrategy _pruningStrategy; - - private readonly IPersistenceStrategy _persistenceStrategy; - - private readonly ILogger _logger; - - private ConcurrentQueue _commitSetQueue; - - private ConcurrentQueue CommitSetQueue => - (_commitSetQueue ?? CreateQueueAtomic(ref _commitSetQueue)); - - private BlockCommitSet? _lastCommitSet = null; - - private long _memoryUsedByDirtyCache; - - private int _committedNodesCount; - - private int _persistedNodesCount; - - private long _latestPersistedBlockNumber; - - private BlockCommitter? _currentBlockCommitter = null; - - - private long LatestCommittedBlockNumber { get; set; } - public INodeStorage.KeyScheme Scheme => _nodeStorage.Scheme; - - [MethodImpl(MethodImplOptions.NoInlining)] - private static ConcurrentQueue CreateQueueAtomic(ref ConcurrentQueue val) - { - ConcurrentQueue instance = new(); - ConcurrentQueue? prior = Interlocked.CompareExchange(ref val, instance, null); - return prior ?? instance; - } - - protected virtual void VerifyNewCommitSet(long blockNumber) - { - if (_lastCommitSet is not null) - { - Debug.Assert(_lastCommitSet.IsSealed, "Not sealed when beginning new block"); - - if (_lastCommitSet.BlockNumber != blockNumber - 1 && blockNumber != 0 && _lastCommitSet.BlockNumber != 0) - { - if (_logger.IsInfo) _logger.Info($"Non consecutive block commit. This is likely a reorg. Last block commit: {_lastCommitSet.BlockNumber}. New block commit: {blockNumber}."); - } - } - } - - private BlockCommitSet CreateCommitSet(long blockNumber) - { - if (_logger.IsDebug) _logger.Debug($"Beginning new {nameof(BlockCommitSet)} - {blockNumber}"); - - VerifyNewCommitSet(blockNumber); - - BlockCommitSet commitSet = new(blockNumber); - CommitSetQueue.Enqueue(commitSet); - - _lastCommitSet = commitSet; - - LatestCommittedBlockNumber = Math.Max(blockNumber, LatestCommittedBlockNumber); - // Why are we announcing **before** committing next block?? - // Should it be after commit? - AnnounceReorgBoundaries(); - DequeueOldCommitSets(); - - return commitSet; - } - - /// - /// Persists all transient (not yet persisted) starting from root. - /// Already persisted nodes are skipped. After this action we are sure that the full state is available - /// for the block represented by this commit set. - /// - /// A commit set of a block which root is to be persisted. - /// Special action to be called on each persist. Used to track which node to remove. - /// - private void ParallelPersistBlockCommitSet( - BlockCommitSet commitSet, - Action? persistedNodeRecorder = null, - WriteFlags writeFlags = WriteFlags.None - ) - { - INodeStorage.WriteBatch topLevelWriteBatch = _nodeStorage.StartWriteBatch(); - const int parallelBoundaryPathLength = 2; - - using ArrayPoolList<(TrieNode trieNode, Hash256? address2, TreePath path)> parallelStartNodes = new(ShardedDirtyNodeCount); - - void TopLevelPersist(TrieNode tn, Hash256? address2, TreePath path) - { - if (path.Length < parallelBoundaryPathLength) - { - persistedNodeRecorder?.Invoke(path, address2, tn); - PersistNode(address2, path, tn, topLevelWriteBatch, writeFlags); - } - else - { - parallelStartNodes.Add((tn, address2, path)); - } - } - - if (_pruningStrategy.PruningEnabled) - { - if (_logger.IsInfo) _logger.Info($"Persisting from root {commitSet.Root?.Keccak} in block {commitSet.BlockNumber}"); - } - else - { - if (_logger.IsDebug) _logger.Debug($"Persisting from root {commitSet.Root?.Keccak} in block {commitSet.BlockNumber}"); - } - - long start = Stopwatch.GetTimestamp(); - - // The first CallRecursive stop at two level, yielding 256 node in parallelStartNodes, which is run concurrently - TreePath path = TreePath.Empty; - commitSet.Root?.CallRecursively(TopLevelPersist, null, ref path, GetTrieStore(null), true, _logger, maxPathLength: parallelBoundaryPathLength); - - // The amount of change in the subtrees are not balanced at all. So their writes ares buffered here - // which get disposed in parallel instead of being disposed in `PersistNodeStartingFrom`. - // This unfortunately is not atomic - // However, anything that we are trying to persist here should still be in dirty cache. - // So parallel read should go there first instead of to the database for these dataset, - // so it should be fine for these to be non atomic. - using BlockingCollection disposeQueue = new BlockingCollection(4); - - for (int index = 0; index < _disposeTasks.Length; index++) - { - _disposeTasks[index] = Task.Run(() => - { - while (disposeQueue.TryTake(out INodeStorage.WriteBatch disposable, Timeout.Infinite)) - { - disposable.Dispose(); - } - }); - } - - Task.WaitAll(parallelStartNodes.Select(entry => Task.Run(() => - { - (TrieNode trieNode, Hash256? address2, TreePath path2) = entry; - PersistNodeStartingFrom(trieNode, address2, path2, persistedNodeRecorder, writeFlags, disposeQueue); - }))); - - disposeQueue.CompleteAdding(); - Task.WaitAll(_disposeTasks); - - // Dispose top level last in case something goes wrong, at least the root won't be stored - topLevelWriteBatch.Dispose(); - _nodeStorage.Flush(onlyWal: true); - - long elapsedMilliseconds = (long)Stopwatch.GetElapsedTime(start).TotalMilliseconds; - Metrics.SnapshotPersistenceTime = elapsedMilliseconds; - - if (_logger.IsDebug) _logger.Debug($"Persisted trie from {commitSet.Root} at {commitSet.BlockNumber} in {elapsedMilliseconds}ms (cache memory {MemoryUsedByDirtyCache})"); - - LastPersistedBlockNumber = commitSet.BlockNumber; - } - - private void PersistNodeStartingFrom(TrieNode tn, Hash256 address2, TreePath path, - Action? persistedNodeRecorder, - WriteFlags writeFlags, BlockingCollection disposeQueue) - { - long persistedNodeCount = 0; - INodeStorage.WriteBatch writeBatch = _nodeStorage.StartWriteBatch(); - - void DoPersist(TrieNode node, Hash256? address3, TreePath path2) - { - persistedNodeRecorder?.Invoke(path2, address3, node); - PersistNode(address3, path2, node, writeBatch, writeFlags); - - persistedNodeCount++; - if (persistedNodeCount % 512 == 0) - { - disposeQueue.Add(writeBatch); - writeBatch = _nodeStorage.StartWriteBatch(); - } - } - - tn.CallRecursively(DoPersist, address2, ref path, GetTrieStore(address2), true, _logger); - disposeQueue.Add(writeBatch); - } - - private void PersistNode(Hash256? address, in TreePath path, TrieNode currentNode, INodeStorage.WriteBatch writeBatch, WriteFlags writeFlags = WriteFlags.None) - { - ArgumentNullException.ThrowIfNull(currentNode); - - if (currentNode.Keccak is not null) - { - // Note that the LastSeen value here can be 'in the future' (greater than block number - // if we replaced a newly added node with an older copy and updated the LastSeen value. - // Here we reach it from the old root so it appears to be out of place but it is correct as we need - // to prevent it from being removed from cache and also want to have it persisted. - - if (_logger.IsTrace) _logger.Trace($"Persisting {nameof(TrieNode)} {currentNode}."); - writeBatch.Set(address, path, currentNode.Keccak, currentNode.FullRlp, writeFlags); - currentNode.IsPersisted = true; - IncrementPersistedNodesCount(); - } - else - { - Debug.Assert(currentNode.FullRlp.IsNotNull && currentNode.FullRlp.Length < 32, - "We only expect persistence call without Keccak for the nodes that are kept inside the parent RLP (less than 32 bytes)."); - } - } - - public bool IsNoLongerNeeded(TrieNode node) - { - return IsNoLongerNeeded(node.LastSeen); - } - - public bool IsNoLongerNeeded(long lastSeen) - { - Debug.Assert(lastSeen >= 0, $"Any node that is cache should have {nameof(TrieNode.LastSeen)} set."); - return lastSeen < LastPersistedBlockNumber - && lastSeen < LatestCommittedBlockNumber - _pruningStrategy.MaxDepth; - } - - private void DequeueOldCommitSets() - { - if (_commitSetQueue?.IsEmpty ?? true) return; - - while (_commitSetQueue.TryPeek(out BlockCommitSet blockCommitSet)) - { - if (blockCommitSet.BlockNumber < LatestCommittedBlockNumber - _pruningStrategy.MaxDepth - 1) - { - if (_logger.IsDebug) _logger.Debug($"Removing historical ({_commitSetQueue.Count}) {blockCommitSet.BlockNumber} < {LatestCommittedBlockNumber} - {_pruningStrategy.MaxDepth}"); - _commitSetQueue.TryDequeue(out _); - } - else - { - break; - } - } - } - - private void AnnounceReorgBoundaries() - { - if (LatestCommittedBlockNumber < 1) - { - return; - } - - bool shouldAnnounceReorgBoundary = !_pruningStrategy.PruningEnabled; - bool isFirstCommit = Interlocked.Exchange(ref _isFirst, 1) == 0; - if (isFirstCommit) - { - if (_logger.IsDebug) _logger.Debug($"Reached first commit - newest {LatestCommittedBlockNumber}, last persisted {LastPersistedBlockNumber}"); - // this is important when transitioning from fast sync - // imagine that we transition at block 1200000 - // and then we close the app at 1200010 - // in such case we would try to continue at Head - 1200010 - // because head is loaded if there is no persistence checkpoint - // so we need to force the persistence checkpoint - long baseBlock = Math.Max(0, LatestCommittedBlockNumber - 1); - LastPersistedBlockNumber = baseBlock; - shouldAnnounceReorgBoundary = true; - } - else if (!_lastPersistedReachedReorgBoundary) - { - // even after we persist a block we do not really remember it as a safe checkpoint - // until max reorgs blocks after - if (LatestCommittedBlockNumber >= LastPersistedBlockNumber + _pruningStrategy.MaxDepth) - { - shouldAnnounceReorgBoundary = true; - } - } - - if (shouldAnnounceReorgBoundary) - { - ReorgBoundaryReached?.Invoke(this, new ReorgBoundaryReached(LastPersistedBlockNumber)); - _lastPersistedReachedReorgBoundary = true; - } - } - - private void PersistOnShutdown() - { - // If we are in archive mode, we don't need to change reorg boundaries. - if (_pruningStrategy.PruningEnabled) - { - if (_commitSetQueue?.IsEmpty ?? true) return; - // here we try to shorten the number of blocks recalculated when restarting (so we force persist) - // and we need to speed up the standard announcement procedure so we persists a block - - using ArrayPoolList candidateSets = new(_commitSetQueue.Count); - while (_commitSetQueue.TryDequeue(out BlockCommitSet frontSet)) - { - if (!frontSet.IsSealed || candidateSets.Count == 0 || candidateSets[0].BlockNumber == frontSet!.BlockNumber) - { - candidateSets.Add(frontSet); - } - else if (frontSet!.BlockNumber < LatestCommittedBlockNumber - _pruningStrategy.MaxDepth - && frontSet!.BlockNumber > candidateSets[0].BlockNumber) - { - candidateSets.Clear(); - candidateSets.Add(frontSet); - } - } - - INodeStorage.WriteBatch writeBatch = _nodeStorage.StartWriteBatch(); - for (int index = 0; index < candidateSets.Count; index++) - { - BlockCommitSet blockCommitSet = candidateSets[index]; - if (_logger.IsDebug) _logger.Debug($"Persisting on disposal {blockCommitSet} (cache memory at {MemoryUsedByDirtyCache})"); - ParallelPersistBlockCommitSet(blockCommitSet); - } - writeBatch.Dispose(); - - if (candidateSets.Count == 0) - { - if (_logger.IsDebug) _logger.Debug("No commitset to persist at all."); - } - else - { - AnnounceReorgBoundaries(); - } - } - } - - public void PersistCache(CancellationToken cancellationToken) - { - if (_logger.IsInfo) _logger.Info("Full Pruning Persist Cache started."); - - lock (_dirtyNodesLock) - { - int commitSetCount = 0; - long start = Stopwatch.GetTimestamp(); - // We persist all sealed Commitset causing PruneCache to almost completely clear the cache. Any new block that - // need existing node will have to read back from db causing copy-on-read mechanism to copy the node. - void ClearCommitSetQueue() - { - while (_commitSetQueue.TryPeek(out BlockCommitSet commitSet) && commitSet.IsSealed) - { - if (!_commitSetQueue.TryDequeue(out commitSet)) break; - if (!commitSet.IsSealed) - { - // Oops - _commitSetQueue.Enqueue(commitSet); - break; - } - - commitSetCount++; - ParallelPersistBlockCommitSet(commitSet); - } - } - - if (!(_commitSetQueue?.IsEmpty ?? true)) - { - // We persist outside of lock first. - ClearCommitSetQueue(); - } - - if (_logger.IsInfo) _logger.Info($"Saving all commit set took {Stopwatch.GetElapsedTime(start)} for {commitSetCount} commit sets."); - - start = Stopwatch.GetTimestamp(); - - // Double check - ClearCommitSetQueue(); - if (cancellationToken.IsCancellationRequested) return; - - // This should clear most nodes. For some reason, not all. - PruneCache(skipRecalculateMemory: true); - if (cancellationToken.IsCancellationRequested) return; - - for (int index = 0; index < _dirtyNodes.Length; index++) - { - TrieStoreDirtyNodesCache dirtyNode = _dirtyNodes[index]; - _dirtyNodesTasks[index] = Task.Run(() => - { - dirtyNode.PersistAll(_nodeStorage, cancellationToken); - }); - } - - Task.WaitAll(_dirtyNodesTasks); - - if (cancellationToken.IsCancellationRequested) return; - - PruneCache(); - - int dirtyNodesCount = DirtyNodesCount(); - if (dirtyNodesCount != 0) - { - if (_logger.IsWarn) _logger.Warn($"{dirtyNodesCount} cache entry remains."); - } - - foreach (TrieStoreDirtyNodesCache dirtyNode in _dirtyNodes) - { - dirtyNode.ClearLivePruningTracking(); - } - - if (_logger.IsInfo) _logger.Info($"Clear cache took {Stopwatch.GetElapsedTime(start)}."); - } - } - - // Used to serve node by hash - private byte[]? GetByHash(ReadOnlySpan key, ReadFlags flags = ReadFlags.None) - { - Hash256 asHash = new Hash256(key); - return _pruningStrategy.PruningEnabled - && DirtyNodesTryGetValue(new TrieStoreDirtyNodesCache.Key(null, TreePath.Empty, asHash), out TrieNode? trieNode) - && trieNode is not null - && trieNode.NodeType != NodeType.Unknown - && trieNode.FullRlp.IsNotNull - ? trieNode.FullRlp.ToArray() - : _nodeStorage.Get(null, TreePath.Empty, asHash, flags); - } - - public IReadOnlyKeyValueStore TrieNodeRlpStore => _publicStore; - public bool IsCurrentlyFullPruning => _persistenceStrategy.IsFullPruning; - - public void Set(Hash256? address, in TreePath path, in ValueHash256 keccak, byte[] rlp) - { - _nodeStorage.Set(address, path, keccak, rlp); - } - - private class TrieKeyValueStore : IReadOnlyKeyValueStore - { - private readonly TrieStore _trieStore; - - public TrieKeyValueStore(TrieStore trieStore) - { - _trieStore = trieStore; - } - - public byte[]? Get(ReadOnlySpan key, ReadFlags flags = ReadFlags.None) => _trieStore.GetByHash(key, flags); - } - - public bool HasRoot(Hash256 stateRoot) - { - if (stateRoot == Keccak.EmptyTreeHash) return true; - TrieNode node = FindCachedOrUnknown(null, TreePath.Empty, stateRoot, true); - if (node.NodeType == NodeType.Unknown) - { - return TryLoadRlp(null, TreePath.Empty, node.Keccak, ReadFlags.None) is not null; - } - - return true; - } - - private class BlockCommitter( - TrieStore trieStore, - BlockCommitSet commitSet - ) : IBlockCommitter - { - internal TrieNode? StateRoot = null; - private int _concurrency = trieStore._pruningStrategy.PruningEnabled ? Environment.ProcessorCount : 0; - - public void Dispose() - { - trieStore.FinishBlockCommit(commitSet, StateRoot); - } - - public ICommitter GetTrieCommitter(Hash256? address, TrieNode? root, WriteFlags writeFlags) - { - if (address is null) StateRoot = root; - return trieStore._pruningStrategy.PruningEnabled - ? new PruningTrieStoreCommitter(this, trieStore, commitSet.BlockNumber, address, root) - : new NonPruningTrieStoreCommitter(trieStore, address, trieStore._nodeStorage.StartWriteBatch(), writeFlags); - } - - public bool TryRequestConcurrencyQuota() - { - if (Interlocked.Decrement(ref _concurrency) >= 0) - { - return true; - } - - ReturnConcurrencyQuota(); - return false; - } - - public void ReturnConcurrencyQuota() => Interlocked.Increment(ref _concurrency); - } - - private class PruningTrieStoreCommitter( - BlockCommitter blockCommitter, - TrieStore trieStore, - long blockNumber, - Hash256? address, - TrieNode root - ) : ICommitter - { - private readonly bool _needToResetRoot = root is not null && root.IsDirty; - - public void Dispose() - { - if (_needToResetRoot) - { - // During commit it PatriciaTrie, the root may get resolved to an existing node (same keccak). - // This ensure that the root that we use here is the same. - // This is only needed for state tree as the root need to be put in the block commit set. - if (address == null) blockCommitter.StateRoot = trieStore.FindCachedOrUnknown(address, TreePath.Empty, root?.Keccak); - } - } - - public void CommitNode(ref TreePath path, NodeCommitInfo nodeCommitInfo) => - trieStore.CommitAndInsertToDirtyNodes(blockNumber, address, ref path, nodeCommitInfo); - - public bool TryRequestConcurrentQuota() => blockCommitter.TryRequestConcurrencyQuota(); - - public void ReturnConcurrencyQuota() => blockCommitter.ReturnConcurrencyQuota(); - } - - private class NonPruningTrieStoreCommitter( - TrieStore trieStore, - Hash256? address, - INodeStorage.WriteBatch writeBatch, - WriteFlags writeFlags = WriteFlags.None - ) : ICommitter - { - public void Dispose() - { -<<<<<<< HEAD - INodeStorage.WriteBatch topLevelWriteBatch = _nodeStorage.StartWriteBatch(); - const int parallelBoundaryPathLength = 2; - - using ArrayPoolList<(TrieNode trieNode, Hash256? address2, TreePath path)> parallelStartNodes = new(ShardedDirtyNodeCount); - - void TopLevelPersist(TrieNode tn, Hash256? address2, TreePath path) - { - if (path.Length < parallelBoundaryPathLength) - { - persistedNodeRecorder?.Invoke(path, address2, tn); - PersistNode(address2, path, tn, topLevelWriteBatch, writeFlags); - } - else - { - parallelStartNodes.Add((tn, address2, path)); - } - } - - if (_logger.IsDebug) _logger.Debug($"Persisting from root {commitSet.Root} in {commitSet.BlockNumber}"); - - long start = Stopwatch.GetTimestamp(); - - // The first CallRecursive stop at two level, yielding 256 node in parallelStartNodes, which is run concurrently - TreePath path = TreePath.Empty; - commitSet.Root?.CallRecursively(TopLevelPersist, null, ref path, GetTrieStore(null), true, _logger, maxPathLength: parallelBoundaryPathLength); - - // The amount of change in the subtrees are not balanced at all. So their writes ares buffered here - // which get disposed in parallel instead of being disposed in `PersistNodeStartingFrom`. - // This unfortunately is not atomic - // However, anything that we are trying to persist here should still be in dirty cache. - // So parallel read should go there first instead of to the database for these dataset, - // so it should be fine for these to be non atomic. - using BlockingCollection disposeQueue = new BlockingCollection(4); - - for (int index = 0; index < _disposeTasks.Length; index++) - { - _disposeTasks[index] = Task.Run(() => - { - while (disposeQueue.TryTake(out INodeStorage.WriteBatch disposable, Timeout.Infinite)) - { - disposable.Dispose(); - } - }); - } - - using ArrayPoolList tasks = parallelStartNodes.Select(entry => Task.Run(() => - { - (TrieNode trieNode, Hash256? address2, TreePath path2) = entry; - PersistNodeStartingFrom(trieNode, address2, path2, persistedNodeRecorder, writeFlags, disposeQueue); - })).ToPooledList(parallelStartNodes.Count); - - Task.WaitAll(tasks.AsSpan()); - - disposeQueue.CompleteAdding(); - Task.WaitAll(_disposeTasks); - - // Dispose top level last in case something goes wrong, at least the root wont be stored - topLevelWriteBatch.Dispose(); - - long elapsedMilliseconds = (long)Stopwatch.GetElapsedTime(start).TotalMilliseconds; - Metrics.SnapshotPersistenceTime = elapsedMilliseconds; - - if (_logger.IsDebug) _logger.Debug($"Persisted trie from {commitSet.Root} at {commitSet.BlockNumber} in {elapsedMilliseconds}ms (cache memory {MemoryUsedByDirtyCache})"); - - LastPersistedBlockNumber = commitSet.BlockNumber; -======= - writeBatch.Dispose(); ->>>>>>> origin/master - } - - public void CommitNode(ref TreePath path, NodeCommitInfo nodeCommitInfo) - { - trieStore.CommitAndPersistNode(address, ref path, nodeCommitInfo, writeFlags: writeFlags, writeBatch: writeBatch); - } - - public bool TryRequestConcurrentQuota() => false; - - public void ReturnConcurrencyQuota() { } - } - - internal static class HashHelpers - { - private const int HashPrime = 101; - - private static bool IsPrime(int candidate) - { - if ((candidate & 1) != 0) - { - int limit = (int)Math.Sqrt(candidate); - for (int divisor = 3; divisor <= limit; divisor += 2) - { - if ((candidate % divisor) == 0) - return false; - } - return true; - } - return candidate == 2; - } - - public static int GetPrime(int min) - { - foreach (int prime in Primes) - { - if (prime >= min) - return prime; - } - - // Outside of our predefined table. Compute the hard way. - for (int i = (min | 1); i < int.MaxValue; i += 2) - { - if (IsPrime(i) && ((i - 1) % HashPrime != 0)) - return i; - } - return min; - } - - // Table of prime numbers to use as hash table sizes. - // A typical resize algorithm would pick the smallest prime number in this array - // that is larger than twice the previous capacity. - // Suppose our Hashtable currently has capacity x and enough elements are added - // such that a resize needs to occur. Resizing first computes 2x then finds the - // first prime in the table greater than 2x, i.e. if primes are ordered - // p_1, p_2, ..., p_i, ..., it finds p_n such that p_n-1 < 2x < p_n. - // Doubling is important for preserving the asymptotic complexity of the - // hashtable operations such as add. Having a prime guarantees that double - // hashing does not lead to infinite loops. IE, your hash function will be - // h1(key) + i*h2(key), 0 <= i < size. h2 and the size must be relatively prime. - // We prefer the low computation costs of higher prime numbers over the increased - // memory allocation of a fixed prime number i.e. when right sizing a HashSet. - private static ReadOnlySpan Primes => - [ - 3, - 7, - 11, - 17, - 23, - 29, - 37, - 47, - 59, - 71, - 89, - 107, - 131, - 163, - 197, - 239, - 293, - 353, - 431, - 521, - 631, - 761, - 919, - 1103, - 1327, - 1597, - 1931, - 2333, - 2801, - 3371, - 4049, - 4861, - 5839, - 7013, - 8419, - 10103, - 12143, - 14591, - 17519, - 21023, - 25229, - 30293, - 36353, - 43627, - 52361, - 62851, - 75431, - 90523, - 108631, - 130363, - 156437, - 187751, - 225307, - 270371, - 324449, - 389357, - 467237, - 560689, - 672827, - 807403, - 968897, - 1162687, - 1395263, - 1674319, - 2009191, - 2411033, - 2893249, - 3471899, - 4166287, - 4999559, - 5999471, - 7199369 - ]; - } -} From 8ff1fd7aab23489215f6f40cc947d07c52a035f5 Mon Sep 17 00:00:00 2001 From: "lukasz.rozmej" Date: Wed, 20 Nov 2024 11:42:30 +0100 Subject: [PATCH 14/14] one more pooled list from enumerable --- src/Nethermind/Nethermind.Trie/Pruning/TrieStore.cs | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/src/Nethermind/Nethermind.Trie/Pruning/TrieStore.cs b/src/Nethermind/Nethermind.Trie/Pruning/TrieStore.cs index 33c5da9e863..cdccc13d808 100644 --- a/src/Nethermind/Nethermind.Trie/Pruning/TrieStore.cs +++ b/src/Nethermind/Nethermind.Trie/Pruning/TrieStore.cs @@ -3,6 +3,7 @@ using System; using System.Collections.Concurrent; +using System.Collections.Generic; using System.Diagnostics; using System.Diagnostics.CodeAnalysis; using System.Linq; @@ -802,11 +803,11 @@ void TopLevelPersist(TrieNode tn, Hash256? address2, TreePath path) }); } - Task.WaitAll(parallelStartNodes.Select(entry => Task.Run(() => - { - (TrieNode trieNode, Hash256? address2, TreePath path2) = entry; - PersistNodeStartingFrom(trieNode, address2, path2, persistedNodeRecorder, writeFlags, disposeQueue); - }))); + using ArrayPoolList persistNodeStartingFromTasks = parallelStartNodes.Select( + entry => Task.Run(() => PersistNodeStartingFrom(entry.trieNode, entry.address2, entry.path, persistedNodeRecorder, writeFlags, disposeQueue))) + .ToPooledList(parallelStartNodes.Count); + + Task.WaitAll(persistNodeStartingFromTasks.AsSpan()); disposeQueue.CompleteAdding(); Task.WaitAll(_disposeTasks);