diff --git a/op-chain-ops/cmd/check-derivation/main.go b/op-chain-ops/cmd/check-derivation/main.go index 499b128f87..dd5d631942 100644 --- a/op-chain-ops/cmd/check-derivation/main.go +++ b/op-chain-ops/cmd/check-derivation/main.go @@ -114,7 +114,7 @@ func newClientsFromContext(cliCtx *cli.Context) (*ethclient.Client, *sources.Eth MethodResetDuration: time.Minute, } cl := ethclient.NewClient(clients.L2RpcClient) - ethCl, err := sources.NewEthClient(client.NewBaseRPCClient(clients.L2RpcClient), log.Root(), nil, ðClCfg) + ethCl, err := sources.NewEthClient(client.NewBaseRPCClient(clients.L2RpcClient), log.Root(), nil, ðClCfg, false) if err != nil { return nil, nil, err } diff --git a/op-e2e/setup.go b/op-e2e/setup.go index 0be50744c2..f0ebe03f1a 100644 --- a/op-e2e/setup.go +++ b/op-e2e/setup.go @@ -923,6 +923,7 @@ func configureL1(rollupNodeCfg *rollupNode.Config, l1Node EthInstance) { BatchSize: 20, HttpPollInterval: time.Millisecond * 100, MaxConcurrency: 10, + CacheSize: 1000, } } diff --git a/op-node/flags/flags.go b/op-node/flags/flags.go index a6037d1ce9..742e4f9efc 100644 --- a/op-node/flags/flags.go +++ b/op-node/flags/flags.go @@ -180,6 +180,13 @@ var ( Value: 20, Category: L1RPCCategory, } + L1RPCMaxCacheSize = &cli.IntFlag{ + Name: "l1.rpc-max-cache-size", + Usage: "The maximum cache size of the L1 client. it should be greater than or equal to the maximum height difference between the L1 blocks corresponding to the unsafe block height and the safe block height. Must be greater than or equal to 1", + EnvVars: prefixEnvVars("L1_RPC_MAX_CACHE_SIZE"), + Value: 1000, + Category: L1RPCCategory, + } L1HTTPPollInterval = &cli.DurationFlag{ Name: "l1.http-poll-interval", Usage: "Polling interval for latest-block subscription when using an HTTP RPC provider. Ignored for other types of RPC endpoints.", @@ -417,6 +424,7 @@ var optionalFlags = []cli.Flag{ L1RPCProviderKind, L1RPCRateLimit, L1RPCMaxBatchSize, + L1RPCMaxCacheSize, L1RPCMaxConcurrency, L1HTTPPollInterval, L1ArchiveBlobRpcAddr, diff --git a/op-node/node/client.go b/op-node/node/client.go index 3f2bf80b05..6873bb5cc4 100644 --- a/op-node/node/client.go +++ b/op-node/node/client.go @@ -117,6 +117,10 @@ type L1EndpointConfig struct { // BatchSize specifies the maximum batch-size, which also applies as L1 rate-limit burst amount (if set). BatchSize int + // CacheSize specifies the maximum cache size of l1 client. + // it should be greater than or equal to the maximum height difference between the L1 blocks corresponding to the unsafe block height and the safe block height. + CacheSize int + // MaxConcurrency specifies the maximum number of concurrent requests to the L1 RPC. MaxConcurrency int @@ -133,6 +137,9 @@ func (cfg *L1EndpointConfig) Check() error { if cfg.BatchSize < 1 || cfg.BatchSize > 500 { return fmt.Errorf("batch size is invalid or unreasonable: %d", cfg.BatchSize) } + if cfg.CacheSize < 1 { + return fmt.Errorf("cache size is invalid or unreasonable: %d", cfg.CacheSize) + } if cfg.RateLimit < 0 { return fmt.Errorf("rate limit cannot be negative") } @@ -163,6 +170,10 @@ func (cfg *L1EndpointConfig) Setup(ctx context.Context, log log.Logger, rollupCf rpcCfg := sources.L1ClientDefaultConfig(rollupCfg, cfg.L1TrustRPC, cfg.L1RPCKind) rpcCfg.MaxRequestsPerBatch = cfg.BatchSize rpcCfg.MaxConcurrentRequests = cfg.MaxConcurrency + rpcCfg.ReceiptsCacheSize = cfg.CacheSize + rpcCfg.HeadersCacheSize = cfg.CacheSize + rpcCfg.TransactionsCacheSize = cfg.CacheSize + rpcCfg.PayloadsCacheSize = cfg.CacheSize return l1Node, rpcCfg, nil } @@ -177,6 +188,10 @@ func fallbackClientWrap(ctx context.Context, logger log.Logger, urlList []string rpcCfg := sources.L1ClientDefaultConfig(rollupCfg, cfg.L1TrustRPC, cfg.L1RPCKind) rpcCfg.MaxRequestsPerBatch = cfg.BatchSize rpcCfg.MaxConcurrentRequests = cfg.MaxConcurrency + rpcCfg.ReceiptsCacheSize = cfg.CacheSize + rpcCfg.HeadersCacheSize = cfg.CacheSize + rpcCfg.TransactionsCacheSize = cfg.CacheSize + rpcCfg.PayloadsCacheSize = cfg.CacheSize return l1Node, rpcCfg, nil } diff --git a/op-node/service.go b/op-node/service.go index 8602baa7e5..7d850ed91d 100644 --- a/op-node/service.go +++ b/op-node/service.go @@ -158,6 +158,7 @@ func NewL1EndpointConfig(ctx *cli.Context) *node.L1EndpointConfig { L1RPCKind: sources.RPCProviderKind(strings.ToLower(ctx.String(flags.L1RPCProviderKind.Name))), RateLimit: ctx.Float64(flags.L1RPCRateLimit.Name), BatchSize: ctx.Int(flags.L1RPCMaxBatchSize.Name), + CacheSize: ctx.Int(flags.L1RPCMaxCacheSize.Name), HttpPollInterval: ctx.Duration(flags.L1HTTPPollInterval.Name), MaxConcurrency: ctx.Int(flags.L1RPCMaxConcurrency.Name), } diff --git a/op-service/sources/l1_client.go b/op-service/sources/l1_client.go index a347a17272..e2359a843a 100644 --- a/op-service/sources/l1_client.go +++ b/op-service/sources/l1_client.go @@ -206,7 +206,10 @@ func (s *L1Client) GoOrUpdatePreFetchReceipts(ctx context.Context, l1Start uint6 continue } if !isSuccess { - s.log.Debug("pre fetch receipts fail without error,need retry", "blockHash", blockInfo.Hash, "blockNumber", blockNumber) + s.log.Debug("The receipts cache may be full. "+ + "please ensure the maximum height difference between the L1 blocks "+ + "corresponding to the unsafe block height and the safe block height is less than or equal to the cache size.", + "blockHash", blockInfo.Hash, "blockNumber", blockNumber) time.Sleep(1 * time.Second) continue }