diff --git a/ecosystem/cache/cache_test.go b/ecosystem/cache/cache_test.go index 3f6e763a4e..1f2cfbbbbc 100644 --- a/ecosystem/cache/cache_test.go +++ b/ecosystem/cache/cache_test.go @@ -31,7 +31,16 @@ const ( func initTest() (context.Context, *cache.RelayerCacheServer) { ctx := context.Background() cs := cache.CacheServer{CacheMaxCost: 2 * 1024 * 1024 * 1024} - cs.InitCache(ctx, cache.DefaultExpirationTimeFinalized, cache.DefaultExpirationForNonFinalized, cache.DisabledFlagOption, cache.DefaultExpirationTimeFinalizedMultiplier, cache.DefaultExpirationTimeNonFinalizedMultiplier) + cs.InitCache( + ctx, + cache.DefaultExpirationTimeFinalized, + cache.DefaultExpirationForNonFinalized, + cache.DefaultExpirationNodeErrors, + cache.DefaultExpirationBlocksHashesToHeights, + cache.DisabledFlagOption, + cache.DefaultExpirationTimeFinalizedMultiplier, + cache.DefaultExpirationTimeNonFinalizedMultiplier, + ) cacheServer := &cache.RelayerCacheServer{CacheServer: &cs} return ctx, cacheServer } @@ -85,11 +94,12 @@ func TestCacheSetGet(t *testing.T) { Finalized: tt.finalized, RequestedBlock: request.RequestBlock, } - _, err = cacheServer.GetRelay(ctx, &messageGet) + reply, err := cacheServer.GetRelay(ctx, &messageGet) + require.NoError(t, err) if tt.valid { - require.NoError(t, err) + require.NotNil(t, reply.Reply) } else { - require.Error(t, err) + require.Nil(t, reply.Reply) } }) } @@ -169,9 +179,9 @@ func TestCacheGetWithoutSet(t *testing.T) { Finalized: tt.finalized, RequestedBlock: request.RequestBlock, } - _, err := cacheServer.GetRelay(ctx, &messageGet) - - require.Error(t, err) + reply, err := cacheServer.GetRelay(ctx, &messageGet) + require.Nil(t, reply.Reply) + require.NoError(t, err) }) } } @@ -333,7 +343,7 @@ func TestCacheSetGetLatest(t *testing.T) { require.Equal(t, cacheReply.GetReply().LatestBlock, latestBlockForRelay) } } else { - require.Error(t, err) + require.Nil(t, cacheReply.Reply) } }) } @@ -410,7 +420,7 @@ func TestCacheSetGetLatestWhenAdvancingLatest(t *testing.T) { require.Equal(t, cacheReply.GetReply().LatestBlock, latestBlockForRelay) } } else { - require.Error(t, err) + require.Nil(t, cacheReply.Reply) } request2 := shallowCopy(request) @@ -435,8 +445,9 @@ func TestCacheSetGetLatestWhenAdvancingLatest(t *testing.T) { RequestedBlock: request.RequestBlock, } // repeat our latest block get, this time we expect it to look for a newer block and fail - _, err = cacheServer.GetRelay(ctx, &messageGet) - require.Error(t, err) + reply, err := cacheServer.GetRelay(ctx, &messageGet) + require.NoError(t, err) + require.Nil(t, reply.Reply) }) } } @@ -462,7 +473,7 @@ func TestCacheSetGetJsonRPCWithID(t *testing.T) { {name: "NonFinalized With Hash", valid: true, delay: time.Millisecond, finalized: false, hash: []byte{1, 2, 3}}, {name: "NonFinalized After delay With Hash", valid: true, delay: cache.DefaultExpirationForNonFinalized + time.Millisecond, finalized: false, hash: []byte{1, 2, 3}}, - // Null ID in get and set + // // Null ID in get and set {name: "Finalized No Hash, with null id in get and set", valid: true, delay: time.Millisecond, finalized: true, hash: nil, nullIdInGet: true, nullIdInSet: true}, {name: "Finalized After delay No Hash, with null id in get and set", valid: true, delay: cache.DefaultExpirationForNonFinalized + time.Millisecond, finalized: true, hash: nil, nullIdInGet: true, nullIdInSet: true}, {name: "NonFinalized No Hash, with null id in get and set", valid: true, delay: time.Millisecond, finalized: false, hash: nil, nullIdInGet: true, nullIdInSet: true}, @@ -472,7 +483,7 @@ func TestCacheSetGetJsonRPCWithID(t *testing.T) { {name: "NonFinalized With Hash, with null id in get and set", valid: true, delay: time.Millisecond, finalized: false, hash: []byte{1, 2, 3}, nullIdInGet: true, nullIdInSet: true}, {name: "NonFinalized After delay With Hash, with null id in get and set", valid: true, delay: cache.DefaultExpirationForNonFinalized + time.Millisecond, finalized: false, hash: []byte{1, 2, 3}, nullIdInGet: true, nullIdInSet: true}, - // Null ID only in get + // // Null ID only in get {name: "Finalized No Hash, with null id only in get", valid: true, delay: time.Millisecond, finalized: true, hash: nil, nullIdInGet: true}, {name: "Finalized After delay No Hash, with null id only in get", valid: true, delay: cache.DefaultExpirationForNonFinalized + time.Millisecond, finalized: true, hash: nil, nullIdInGet: true}, {name: "NonFinalized No Hash, with null id only in get", valid: true, delay: time.Millisecond, finalized: false, hash: nil, nullIdInGet: true}, @@ -482,7 +493,7 @@ func TestCacheSetGetJsonRPCWithID(t *testing.T) { {name: "NonFinalized With Hash, with null id only in get", valid: true, delay: time.Millisecond, finalized: false, hash: []byte{1, 2, 3}, nullIdInGet: true}, {name: "NonFinalized After delay With Hash, with null id only in get", valid: true, delay: cache.DefaultExpirationForNonFinalized + time.Millisecond, finalized: false, hash: []byte{1, 2, 3}, nullIdInGet: true}, - // Null ID only in set + // // Null ID only in set {name: "Finalized No Hash, with null id only in set", valid: true, delay: time.Millisecond, finalized: true, hash: nil, nullIdInSet: true}, {name: "Finalized After delay No Hash, with null id only in set", valid: true, delay: cache.DefaultExpirationForNonFinalized + time.Millisecond, finalized: true, hash: nil, nullIdInSet: true}, {name: "NonFinalized No Hash, with null id only in set", valid: true, delay: time.Millisecond, finalized: false, hash: nil, nullIdInSet: true}, @@ -547,20 +558,21 @@ func TestCacheSetGetJsonRPCWithID(t *testing.T) { } cacheReply, err := cacheServer.GetRelay(ctx, &messageGet) + // because we always need a cache reply. we cant return an error in any case. + // grpc do not allow returning errors + messages + require.NoError(t, err) + if tt.valid { cacheReply.Reply.Data = outputFormatter(cacheReply.Reply.Data) - require.NoError(t, err) - result := gjson.GetBytes(cacheReply.GetReply().Data, format.IDFieldName) extractedID := result.Raw - if tt.nullIdInGet { require.Equal(t, "null", extractedID) } else { require.Equal(t, strconv.FormatInt(changedID, 10), extractedID) } } else { - require.Error(t, err) + require.Nil(t, cacheReply.Reply) } }) } @@ -583,7 +595,16 @@ func TestCacheExpirationMultiplier(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { cs := cache.CacheServer{CacheMaxCost: 2 * 1024 * 1024 * 1024} - cs.InitCache(context.Background(), cache.DefaultExpirationTimeFinalized, cache.DefaultExpirationForNonFinalized, cache.DisabledFlagOption, 1, tt.multiplier) + cs.InitCache( + context.Background(), + cache.DefaultExpirationTimeFinalized, + cache.DefaultExpirationForNonFinalized, + cache.DefaultExpirationNodeErrors, + cache.DefaultExpirationBlocksHashesToHeights, + cache.DisabledFlagOption, + cache.DefaultExpirationTimeFinalizedMultiplier, + tt.multiplier, + ) cacheServer := &cache.RelayerCacheServer{CacheServer: &cs} durationActual := cacheServer.CacheServer.ExpirationForChain(cache.DefaultExpirationForNonFinalized) @@ -591,3 +612,246 @@ func TestCacheExpirationMultiplier(t *testing.T) { }) } } + +func TestCacheSetGetBlocksHashesToHeightsHappyFlow(t *testing.T) { + t.Parallel() + const ( + SET_INPUT int = iota + GET_INPUT + EXPECTED_FROM_GET + ) + + type step struct { + blockHashesToHeights []*pairingtypes.BlockHashToHeight + inputOrExpected int + } + + steps := []step{ + { + inputOrExpected: SET_INPUT, + blockHashesToHeights: []*pairingtypes.BlockHashToHeight{}, + }, + { + inputOrExpected: GET_INPUT, + blockHashesToHeights: []*pairingtypes.BlockHashToHeight{ + {Hash: "H1"}, + {Hash: "H2"}, + }, + }, + { + inputOrExpected: EXPECTED_FROM_GET, + blockHashesToHeights: []*pairingtypes.BlockHashToHeight{ + { + Hash: "H1", + Height: spectypes.NOT_APPLICABLE, + }, + { + Hash: "H2", + Height: spectypes.NOT_APPLICABLE, + }, + }, + }, + { + inputOrExpected: SET_INPUT, + blockHashesToHeights: []*pairingtypes.BlockHashToHeight{ + { + Hash: "H1", + Height: 1, + }, + }, + }, + { + inputOrExpected: GET_INPUT, + blockHashesToHeights: []*pairingtypes.BlockHashToHeight{ + {Hash: "H1"}, + {Hash: "H2"}, + }, + }, + { + inputOrExpected: EXPECTED_FROM_GET, + blockHashesToHeights: []*pairingtypes.BlockHashToHeight{ + { + Hash: "H1", + Height: 1, + }, + { + Hash: "H2", + Height: spectypes.NOT_APPLICABLE, + }, + }, + }, + { + inputOrExpected: GET_INPUT, + blockHashesToHeights: []*pairingtypes.BlockHashToHeight{ + {Hash: "H1"}, + }, + }, + { + inputOrExpected: EXPECTED_FROM_GET, + blockHashesToHeights: []*pairingtypes.BlockHashToHeight{ + { + Hash: "H1", + Height: 1, + }, + }, + }, + { + inputOrExpected: GET_INPUT, + blockHashesToHeights: []*pairingtypes.BlockHashToHeight{ + {Hash: "H2"}, + }, + }, + { + inputOrExpected: EXPECTED_FROM_GET, + blockHashesToHeights: []*pairingtypes.BlockHashToHeight{ + { + Hash: "H2", + Height: spectypes.NOT_APPLICABLE, + }, + }, + }, + { + inputOrExpected: SET_INPUT, + blockHashesToHeights: []*pairingtypes.BlockHashToHeight{ + { + Hash: "H3", + Height: 3, + }, + }, + }, + { + inputOrExpected: GET_INPUT, + blockHashesToHeights: []*pairingtypes.BlockHashToHeight{ + {Hash: "H1"}, + {Hash: "H2"}, + }, + }, + { + inputOrExpected: EXPECTED_FROM_GET, + blockHashesToHeights: []*pairingtypes.BlockHashToHeight{ + { + Hash: "H1", + Height: 1, + }, + { + Hash: "H2", + Height: spectypes.NOT_APPLICABLE, + }, + }, + }, + { + inputOrExpected: GET_INPUT, + blockHashesToHeights: []*pairingtypes.BlockHashToHeight{ + {Hash: "H1"}, + {Hash: "H2"}, + {Hash: "H3"}, + }, + }, + { + inputOrExpected: EXPECTED_FROM_GET, + blockHashesToHeights: []*pairingtypes.BlockHashToHeight{ + { + Hash: "H1", + Height: 1, + }, + { + Hash: "H2", + Height: spectypes.NOT_APPLICABLE, + }, + { + Hash: "H3", + Height: 3, + }, + }, + }, + { + inputOrExpected: SET_INPUT, + blockHashesToHeights: []*pairingtypes.BlockHashToHeight{ + { + Hash: "H1", + Height: 4, + }, + { + Hash: "H2", + Height: 2, + }, + { + Hash: "H5", + Height: 7, + }, + }, + }, + { + inputOrExpected: GET_INPUT, + blockHashesToHeights: []*pairingtypes.BlockHashToHeight{ + {Hash: "H1"}, + {Hash: "H2"}, + {Hash: "H3"}, + {Hash: "H5"}, + }, + }, + { + inputOrExpected: EXPECTED_FROM_GET, + blockHashesToHeights: []*pairingtypes.BlockHashToHeight{ + { + Hash: "H1", + Height: 4, + }, + { + Hash: "H2", + Height: 2, + }, + { + Hash: "H3", + Height: 3, + }, + { + Hash: "H5", + Height: 7, + }, + }, + }, + } + + t.Run("run cache steps", func(t *testing.T) { + ctx, cacheServer := initTest() + request := getRequest(1230, []byte(StubSig), StubApiInterface) + + var lastCacheResult []*pairingtypes.BlockHashToHeight + for stepNum, step := range steps { + switch step.inputOrExpected { + case SET_INPUT: + messageSet := pairingtypes.RelayCacheSet{ + RequestHash: HashRequest(t, request, StubChainID), + BlockHash: []byte("123456789"), + ChainId: StubChainID, + Response: &pairingtypes.RelayReply{}, + Finalized: true, + RequestedBlock: request.RequestBlock, + BlocksHashesToHeights: step.blockHashesToHeights, + } + + _, err := cacheServer.SetRelay(ctx, &messageSet) + require.NoError(t, err, "step: %d", stepNum) + + // sleep to make sure it's in the cache + time.Sleep(3 * time.Millisecond) + case GET_INPUT: + messageGet := pairingtypes.RelayCacheGet{ + RequestHash: HashRequest(t, request, StubChainID), + BlockHash: []byte("123456789"), + ChainId: StubChainID, + Finalized: true, + RequestedBlock: request.RequestBlock, + BlocksHashesToHeights: step.blockHashesToHeights, + } + + cacheResult, err := cacheServer.GetRelay(ctx, &messageGet) + require.NoError(t, err, "step: %d", stepNum) + lastCacheResult = cacheResult.BlocksHashesToHeights + case EXPECTED_FROM_GET: + require.Equal(t, step.blockHashesToHeights, lastCacheResult, "step: %d", stepNum) + } + } + }) +} diff --git a/ecosystem/cache/command.go b/ecosystem/cache/command.go index 6a5c9972dd..c138a59134 100644 --- a/ecosystem/cache/command.go +++ b/ecosystem/cache/command.go @@ -44,6 +44,7 @@ longer DefaultExpirationForNonFinalized will reduce sync QoS for "latest" reques cacheCmd.Flags().Duration(ExpirationNonFinalizedFlagName, DefaultExpirationForNonFinalized, "how long does a cache entry lasts in the cache for a non finalized entry") cacheCmd.Flags().Float64(ExpirationTimeFinalizedMultiplierFlagName, DefaultExpirationTimeFinalizedMultiplier, "Multiplier for finalized cache entry expiration. 1 means no change (default), 1.2 means 20% longer.") cacheCmd.Flags().Float64(ExpirationTimeNonFinalizedMultiplierFlagName, DefaultExpirationTimeNonFinalizedMultiplier, "Multiplier for non-finalized cache entry expiration. 1 means no change (default), 1.2 means 20% longer.") + cacheCmd.Flags().Duration(ExpirationBlocksHashesToHeightsFlagName, DefaultExpirationBlocksHashesToHeights, "how long does the cache entry lasts in the cache for a block hash to height entry") cacheCmd.Flags().Duration(ExpirationNodeErrorsOnFinalizedFlagName, DefaultExpirationNodeErrors, "how long does a cache entry lasts in the cache for a finalized node error entry") cacheCmd.Flags().String(FlagMetricsAddress, DisabledFlagOption, "address to listen to prometheus metrics 127.0.0.1:5555, later you can curl http://127.0.0.1:5555/metrics") cacheCmd.Flags().Int64(FlagCacheSizeName, 2*1024*1024*1024, "the maximal amount of entries to save") diff --git a/ecosystem/cache/handlers.go b/ecosystem/cache/handlers.go index 9d4c3a3f73..49cbd6adba 100644 --- a/ecosystem/cache/handlers.go +++ b/ecosystem/cache/handlers.go @@ -85,12 +85,34 @@ func (s *RelayerCacheServer) getSeenBlockForSharedStateMode(chainId string, shar return 0 } +func (s *RelayerCacheServer) getBlockHeightsFromHashes(chainId string, hashes []*pairingtypes.BlockHashToHeight) []*pairingtypes.BlockHashToHeight { + for _, hashToHeight := range hashes { + formattedKey := s.formatChainIdWithHashKey(chainId, hashToHeight.Hash) + value, found := getNonExpiredFromCache(s.CacheServer.blocksHashesToHeightsCache, formattedKey) + if found { + if cacheValue, ok := value.(int64); ok { + hashToHeight.Height = cacheValue + } + } else { + hashToHeight.Height = spectypes.NOT_APPLICABLE + } + } + return hashes +} + func (s *RelayerCacheServer) GetRelay(ctx context.Context, relayCacheGet *pairingtypes.RelayCacheGet) (*pairingtypes.CacheRelayReply, error) { cacheReply := &pairingtypes.CacheRelayReply{} var cacheReplyTmp *pairingtypes.CacheRelayReply var err error var seenBlock int64 + // validating that if we had an error, we do not return a reply. + defer func() { + if err != nil { + cacheReply.Reply = nil + } + }() + originalRequestedBlock := relayCacheGet.RequestedBlock // save requested block prior to swap if originalRequestedBlock < 0 { // we need to fetch stored latest block information. getLatestBlock := s.getLatestBlock(latestBlockKey(relayCacheGet.ChainId, "")) @@ -104,11 +126,15 @@ func (s *RelayerCacheServer) GetRelay(ctx context.Context, relayCacheGet *pairin utils.Attribute{Key: "requested_block_parsed", Value: relayCacheGet.RequestedBlock}, utils.Attribute{Key: "seen_block", Value: relayCacheGet.SeenBlock}, ) + + var blockHashes []*pairingtypes.BlockHashToHeight if relayCacheGet.RequestedBlock >= 0 { // we can only fetch - // check seen block is larger than our requested block, we don't need to fetch seen block prior as its already larger than requested block + // we don't need to fetch seen block prior as its already larger than requested block waitGroup := sync.WaitGroup{} - waitGroup.Add(2) // currently we have two groups getRelayInner and getSeenBlock - // fetch all reads at the same time. + waitGroup.Add(3) // currently we have three groups: getRelayInner, getSeenBlock and getBlockHeightsFromHashes + + // fetch all reads at the same time: + // fetch the cache entry go func() { defer waitGroup.Done() cacheReplyTmp, err = s.getRelayInner(relayCacheGet) @@ -116,6 +142,8 @@ func (s *RelayerCacheServer) GetRelay(ctx context.Context, relayCacheGet *pairin cacheReply = cacheReplyTmp // set cache reply only if its not nil, as we need to store seen block in it. } }() + + // fetch seen block go func() { defer waitGroup.Done() // set seen block if required @@ -124,8 +152,16 @@ func (s *RelayerCacheServer) GetRelay(ctx context.Context, relayCacheGet *pairin relayCacheGet.SeenBlock = seenBlock // update state. } }() + + // fetch block hashes + go func() { + defer waitGroup.Done() + blockHashes = s.getBlockHeightsFromHashes(relayCacheGet.ChainId, relayCacheGet.BlocksHashesToHeights) + }() + // wait for all reads to complete before moving forward waitGroup.Wait() + if err == nil { // in case we got a hit validate seen block of the reply. // validate that the response seen block is larger or equal to our expectations. if cacheReply.SeenBlock < lavaslices.Min([]int64{relayCacheGet.SeenBlock, relayCacheGet.RequestedBlock}) { // TODO unitest this. @@ -138,6 +174,7 @@ func (s *RelayerCacheServer) GetRelay(ctx context.Context, relayCacheGet *pairin ) } } + // set seen block. if relayCacheGet.SeenBlock > cacheReply.SeenBlock { cacheReply.SeenBlock = relayCacheGet.SeenBlock @@ -148,22 +185,32 @@ func (s *RelayerCacheServer) GetRelay(ctx context.Context, relayCacheGet *pairin utils.LogAttr("requested block", relayCacheGet.RequestedBlock), utils.LogAttr("request_hash", string(relayCacheGet.RequestHash)), ) + // even if we don't have information on requested block, we can still check if we have data on the block hash array. + blockHashes = s.getBlockHeightsFromHashes(relayCacheGet.ChainId, relayCacheGet.BlocksHashesToHeights) + } + + cacheReply.BlocksHashesToHeights = blockHashes + if blockHashes != nil { + utils.LavaFormatDebug("block hashes:", utils.LogAttr("hashes", blockHashes)) } // add prometheus metrics asynchronously + cacheHit := cacheReply.Reply != nil go func() { cacheMetricsContext, cancel := context.WithTimeout(context.Background(), time.Second) defer cancel() - var hit bool - if err != nil { - s.cacheMiss(cacheMetricsContext, err) - } else { - hit = true + + if cacheHit { s.cacheHit(cacheMetricsContext) + } else { + s.cacheMiss(cacheMetricsContext, err) } - s.CacheServer.CacheMetrics.AddApiSpecific(originalRequestedBlock, relayCacheGet.ChainId, hit) + + s.CacheServer.CacheMetrics.AddApiSpecific(originalRequestedBlock, relayCacheGet.ChainId, cacheHit) }() - return cacheReply, err + // no matter what we return nil from cache. as we need additional info even if we had cache miss + // such as block hashes array, seen block, etc... + return cacheReply, nil } // formatHashKey formats the hash key by adding latestBlock information. @@ -173,6 +220,10 @@ func (s *RelayerCacheServer) formatHashKey(hash []byte, parsedRequestedBlock int return hash } +func (s *RelayerCacheServer) formatChainIdWithHashKey(chainId, hash string) string { + return chainId + "_" + hash +} + func (s *RelayerCacheServer) getRelayInner(relayCacheGet *pairingtypes.RelayCacheGet) (*pairingtypes.CacheRelayReply, error) { // cache key is compressed from: // 1. Request hash including all the information inside RelayPrivateData (Salt can cause issues if not dealt with on consumer side.) @@ -249,6 +300,15 @@ func (s *RelayerCacheServer) setSeenBlockOnSharedStateMode(chainId, sharedStateI s.performInt64WriteWithValidationAndRetry(get, set, seenBlock) } +func (s *RelayerCacheServer) setBlocksHashesToHeights(chainId string, blocksHashesToHeights []*pairingtypes.BlockHashToHeight) { + for _, hashToHeight := range blocksHashesToHeights { + if hashToHeight.Height >= 0 { + formattedKey := s.formatChainIdWithHashKey(chainId, hashToHeight.Hash) + s.CacheServer.blocksHashesToHeightsCache.SetWithTTL(formattedKey, hashToHeight.Height, 1, s.CacheServer.ExpirationBlocksHashesToHeights) + } + } +} + func (s *RelayerCacheServer) SetRelay(ctx context.Context, relayCacheSet *pairingtypes.RelayCacheSet) (*emptypb.Empty, error) { if relayCacheSet.RequestedBlock < 0 { return nil, utils.LavaFormatError("invalid relay cache set data, request block is negative", nil, utils.Attribute{Key: "requestBlock", Value: relayCacheSet.RequestedBlock}) @@ -265,6 +325,7 @@ func (s *RelayerCacheServer) SetRelay(ctx context.Context, relayCacheSet *pairin utils.Attribute{Key: "requestHash", Value: string(relayCacheSet.BlockHash)}, utils.Attribute{Key: "latestKnownBlock", Value: string(relayCacheSet.BlockHash)}, utils.Attribute{Key: "IsNodeError", Value: relayCacheSet.IsNodeError}, + utils.Attribute{Key: "BlocksHashesToHeights", Value: relayCacheSet.BlocksHashesToHeights}, ) // finalized entries can stay there if relayCacheSet.Finalized { @@ -282,6 +343,7 @@ func (s *RelayerCacheServer) SetRelay(ctx context.Context, relayCacheSet *pairin // Setting the seen block for shared state. s.setSeenBlockOnSharedStateMode(relayCacheSet.ChainId, relayCacheSet.SharedStateId, latestKnownBlock) s.setLatestBlock(latestBlockKey(relayCacheSet.ChainId, ""), latestKnownBlock) + s.setBlocksHashesToHeights(relayCacheSet.ChainId, relayCacheSet.BlocksHashesToHeights) return &emptypb.Empty{}, nil } diff --git a/ecosystem/cache/server.go b/ecosystem/cache/server.go index 53fcce34b2..5875a2d856 100644 --- a/ecosystem/cache/server.go +++ b/ecosystem/cache/server.go @@ -29,11 +29,13 @@ const ( ExpirationTimeFinalizedMultiplierFlagName = "expiration-multiplier" ExpirationNonFinalizedFlagName = "expiration-non-finalized" ExpirationTimeNonFinalizedMultiplierFlagName = "expiration-non-finalized-multiplier" + ExpirationBlocksHashesToHeightsFlagName = "expiration-blocks-hashes-to-heights" ExpirationNodeErrorsOnFinalizedFlagName = "expiration-finalized-node-errors" FlagCacheSizeName = "max-items" DefaultExpirationForNonFinalized = 500 * time.Millisecond DefaultExpirationTimeFinalizedMultiplier = 1.0 DefaultExpirationTimeNonFinalizedMultiplier = 1.0 + DefaultExpirationBlocksHashesToHeights = 48 * time.Hour DefaultExpirationTimeFinalized = time.Hour DefaultExpirationNodeErrors = 250 * time.Millisecond CacheNumCounters = 100000000 // expect 10M items @@ -41,31 +43,48 @@ const ( ) type CacheServer struct { - finalizedCache *ristretto.Cache - tempCache *ristretto.Cache - ExpirationFinalized time.Duration - ExpirationNonFinalized time.Duration - ExpirationNodeErrors time.Duration + finalizedCache *ristretto.Cache + tempCache *ristretto.Cache // cache for temporary inputs, such as latest blocks + blocksHashesToHeightsCache *ristretto.Cache + ExpirationFinalized time.Duration + ExpirationNonFinalized time.Duration + ExpirationNodeErrors time.Duration + ExpirationBlocksHashesToHeights time.Duration CacheMetrics *CacheMetrics CacheMaxCost int64 } -func (cs *CacheServer) InitCache(ctx context.Context, expiration time.Duration, expirationNonFinalized time.Duration, metricsAddr string, expirationFinalizedMultiplier float64, expirationNonFinalizedMultiplier float64) { +func (cs *CacheServer) InitCache( + ctx context.Context, + expiration time.Duration, + expirationNonFinalized time.Duration, + expirationNodeErrorsOnFinalized time.Duration, + expirationBlocksHashesToHeights time.Duration, + metricsAddr string, + expirationFinalizedMultiplier float64, + expirationNonFinalizedMultiplier float64, +) { cs.ExpirationFinalized = time.Duration(float64(expiration) * expirationFinalizedMultiplier) cs.ExpirationNonFinalized = time.Duration(float64(expirationNonFinalized) * expirationNonFinalizedMultiplier) + cs.ExpirationNodeErrors = expirationNodeErrorsOnFinalized + cs.ExpirationBlocksHashesToHeights = time.Duration(float64(expirationBlocksHashesToHeights)) - cache, err := ristretto.NewCache(&ristretto.Config{NumCounters: CacheNumCounters, MaxCost: cs.CacheMaxCost, BufferItems: 64}) + var err error + cs.tempCache, err = ristretto.NewCache(&ristretto.Config{NumCounters: CacheNumCounters, MaxCost: cs.CacheMaxCost, BufferItems: 64}) if err != nil { utils.LavaFormatFatal("could not create cache", err) } - cs.tempCache = cache - cache, err = ristretto.NewCache(&ristretto.Config{NumCounters: CacheNumCounters, MaxCost: cs.CacheMaxCost, BufferItems: 64}) + cs.finalizedCache, err = ristretto.NewCache(&ristretto.Config{NumCounters: CacheNumCounters, MaxCost: cs.CacheMaxCost, BufferItems: 64}) if err != nil { utils.LavaFormatFatal("could not create finalized cache", err) } - cs.finalizedCache = cache + + cs.blocksHashesToHeightsCache, err = ristretto.NewCache(&ristretto.Config{NumCounters: CacheNumCounters, MaxCost: cs.CacheMaxCost, BufferItems: 64}) + if err != nil { + utils.LavaFormatFatal("could not create blocks hashes to heights cache", err) + } // initialize prometheus cs.CacheMetrics = NewCacheMetricsServer(metricsAddr) @@ -183,6 +202,16 @@ func Server( utils.LavaFormatFatal("failed to read flag", err, utils.Attribute{Key: "flag", Value: ExpirationNonFinalizedFlagName}) } + expirationNodeErrorsOnFinalizedFlagName, err := flags.GetDuration(ExpirationNodeErrorsOnFinalizedFlagName) + if err != nil { + utils.LavaFormatFatal("failed to read flag", err, utils.Attribute{Key: "flag", Value: ExpirationNodeErrorsOnFinalizedFlagName}) + } + + expirationBlocksHashesToHeights, err := flags.GetDuration(ExpirationBlocksHashesToHeightsFlagName) + if err != nil { + utils.LavaFormatFatal("failed to read flag", err, utils.Attribute{Key: "flag", Value: ExpirationBlocksHashesToHeightsFlagName}) + } + expirationFinalizedMultiplier, err := flags.GetFloat64(ExpirationTimeFinalizedMultiplierFlagName) if err != nil { utils.LavaFormatFatal("failed to read flag", err, utils.Attribute{Key: "flag", Value: ExpirationTimeFinalizedMultiplierFlagName}) @@ -199,7 +228,7 @@ func Server( } cs := CacheServer{CacheMaxCost: cacheMaxCost} - cs.InitCache(ctx, expiration, expirationNonFinalized, metricsAddr, expirationFinalizedMultiplier, expirationNonFinalizedMultiplier) + cs.InitCache(ctx, expiration, expirationNonFinalized, expirationNodeErrorsOnFinalizedFlagName, expirationBlocksHashesToHeights, metricsAddr, expirationFinalizedMultiplier, expirationNonFinalizedMultiplier) // TODO: have a state tracker cs.Serve(ctx, listenAddr) } diff --git a/protocol/chainlib/common_test_utils.go b/protocol/chainlib/common_test_utils.go index 2782799b23..1e4db5dc47 100644 --- a/protocol/chainlib/common_test_utils.go +++ b/protocol/chainlib/common_test_utils.go @@ -26,6 +26,7 @@ import ( "github.com/lavanet/lava/v2/protocol/lavasession" testcommon "github.com/lavanet/lava/v2/testutil/common" keepertest "github.com/lavanet/lava/v2/testutil/keeper" + specutils "github.com/lavanet/lava/v2/utils/keeper" plantypes "github.com/lavanet/lava/v2/x/plans/types" spectypes "github.com/lavanet/lava/v2/x/spec/types" "github.com/stretchr/testify/require" @@ -126,7 +127,7 @@ func CreateChainLibMocks( ) (cpar ChainParser, crout ChainRouter, cfetc chaintracker.ChainFetcher, closeServer func(), endpointRet *lavasession.RPCProviderEndpoint, errRet error) { utils.SetGlobalLoggingLevel("debug") closeServer = nil - spec, err := keepertest.GetASpec(specIndex, getToTopMostPath, nil, nil) + spec, err := specutils.GetASpec(specIndex, getToTopMostPath, nil, nil) if err != nil { return nil, nil, nil, nil, nil, err } @@ -250,7 +251,7 @@ func SetupForTests(t *testing.T, numOfProviders int, specID string, getToTopMost ts.Providers = append(ts.Providers, testcommon.CreateNewAccount(ts.Ctx, *ts.Keepers, balance)) } sdkContext := sdk.UnwrapSDKContext(ts.Ctx) - spec, err := keepertest.GetASpec(specID, getToTopMostPath, &sdkContext, &ts.Keepers.Spec) + spec, err := specutils.GetASpec(specID, getToTopMostPath, &sdkContext, &ts.Keepers.Spec) if err != nil { require.NoError(t, err) } diff --git a/protocol/chainlib/consumer_ws_subscription_manager_test.go b/protocol/chainlib/consumer_ws_subscription_manager_test.go index 36947e53ac..81a59fea87 100644 --- a/protocol/chainlib/consumer_ws_subscription_manager_test.go +++ b/protocol/chainlib/consumer_ws_subscription_manager_test.go @@ -154,7 +154,7 @@ func TestConsumerWSSubscriptionManagerParallelSubscriptionsOnSameDappIdIp(t *tes firstReply, repliesChan, err = manager.StartSubscription(ctx, protocolMessage1, dapp, ip, uniqueIdentifiers[index], nil) go func() { for subMsg := range repliesChan { - utils.LavaFormatInfo("got reply for index", utils.LogAttr("index", index)) + // utils.LavaFormatInfo("got reply for index", utils.LogAttr("index", index)) require.Equal(t, string(play.subscriptionFirstReply1), string(subMsg.Data)) } }() diff --git a/protocol/chainlib/jsonRPC_test.go b/protocol/chainlib/jsonRPC_test.go index a110b22bca..15f9db0dc2 100644 --- a/protocol/chainlib/jsonRPC_test.go +++ b/protocol/chainlib/jsonRPC_test.go @@ -13,7 +13,7 @@ import ( "github.com/lavanet/lava/v2/protocol/chainlib/chainproxy/rpcInterfaceMessages" "github.com/lavanet/lava/v2/protocol/chainlib/extensionslib" "github.com/lavanet/lava/v2/protocol/common" - keepertest "github.com/lavanet/lava/v2/testutil/keeper" + specutils "github.com/lavanet/lava/v2/utils/keeper" plantypes "github.com/lavanet/lava/v2/x/plans/types" spectypes "github.com/lavanet/lava/v2/x/spec/types" "github.com/stretchr/testify/assert" @@ -253,7 +253,7 @@ func TestExtensions(t *testing.T) { configuredExtensions := map[string]struct{}{ "archive": {}, } - spec, err := keepertest.GetASpec(specname, "../../", nil, nil) + spec, err := specutils.GetASpec(specname, "../../", nil, nil) require.NoError(t, err) chainParser.SetPolicy(&plantypes.Policy{ChainPolicies: []plantypes.ChainPolicy{{ChainId: specname, Requirements: []plantypes.ChainRequirement{{Collection: spectypes.CollectionData{ApiInterface: "jsonrpc"}, Extensions: []string{"archive"}}}}}}, specname, "jsonrpc") diff --git a/protocol/common/cobra_common.go b/protocol/common/cobra_common.go index 19a64487d2..d7daa95e59 100644 --- a/protocol/common/cobra_common.go +++ b/protocol/common/cobra_common.go @@ -32,6 +32,7 @@ const ( // Disable relay retries when we get node errors. // This feature is suppose to help with successful relays in some chains that return node errors on rare race conditions on the serviced chains. DisableRetryOnNodeErrorsFlag = "disable-retry-on-node-error" + UseOfflineSpecFlag = "use-offline-spec" // allows the user to manually load a spec providing a path, this is useful to test spec changes before they hit the blockchain ) const ( @@ -55,6 +56,7 @@ type ConsumerCmdFlags struct { DebugRelays bool // enables debug mode for relays DisableConflictTransactions bool // disable conflict transactions DisableRetryOnNodeErrors bool // disable retries on node errors + OfflineSpecPath string // path to the spec file, works only when bootstrapping a single chain. } // default rolling logs behavior (if enabled) will store 3 files each 100MB for up to 1 day every time. diff --git a/protocol/rpcconsumer/rpcconsumer.go b/protocol/rpcconsumer/rpcconsumer.go index 8cee4014e9..c5b5a32e5b 100644 --- a/protocol/rpcconsumer/rpcconsumer.go +++ b/protocol/rpcconsumer/rpcconsumer.go @@ -29,6 +29,7 @@ import ( "github.com/lavanet/lava/v2/protocol/statetracker/updaters" "github.com/lavanet/lava/v2/protocol/upgrade" "github.com/lavanet/lava/v2/utils" + specutils "github.com/lavanet/lava/v2/utils/keeper" "github.com/lavanet/lava/v2/utils/rand" "github.com/lavanet/lava/v2/utils/sigs" conflicttypes "github.com/lavanet/lava/v2/x/conflict/types" @@ -214,8 +215,19 @@ func (rpcc *RPCConsumer) Start(ctx context.Context, options *rpcConsumerStartOpt } else { policyUpdaters.Store(rpcEndpoint.ChainID, updaters.NewPolicyUpdater(chainID, consumerStateTracker, consumerAddr.String(), chainParser, *rpcEndpoint)) } - // register for spec updates - err = rpcc.consumerStateTracker.RegisterForSpecUpdates(ctx, chainParser, *rpcEndpoint) + + if options.cmdFlags.OfflineSpecPath != "" { + // offline spec mode. + parsedOfflineSpec, loadError := specutils.GetSpecFromPath(options.cmdFlags.OfflineSpecPath, rpcEndpoint.ChainID, nil, nil) + if loadError != nil { + err = utils.LavaFormatError("failed loading offline spec", err, utils.LogAttr("spec_path", options.cmdFlags.OfflineSpecPath), utils.LogAttr("spec_id", rpcEndpoint.ChainID)) + } + utils.LavaFormatInfo("Loaded offline spec successfully", utils.LogAttr("spec_path", options.cmdFlags.OfflineSpecPath), utils.LogAttr("chain_id", parsedOfflineSpec.Index)) + chainParser.SetSpec(parsedOfflineSpec) + } else { + // register for spec updates + err = rpcc.consumerStateTracker.RegisterForSpecUpdates(ctx, chainParser, *rpcEndpoint) + } if err != nil { err = utils.LavaFormatError("failed registering for spec updates", err, utils.Attribute{Key: "endpoint", Value: rpcEndpoint}) errCh <- err @@ -561,7 +573,6 @@ rpcconsumer consumer_examples/full_consumer_example.yml --cache-be "127.0.0.1:77 } maxConcurrentProviders := viper.GetUint(common.MaximumConcurrentProvidersFlagName) - consumerPropagatedFlags := common.ConsumerCmdFlags{ HeadersFlag: viper.GetString(common.CorsHeadersFlag), CredentialsFlag: viper.GetString(common.CorsCredentialsFlag), @@ -573,6 +584,12 @@ rpcconsumer consumer_examples/full_consumer_example.yml --cache-be "127.0.0.1:77 DebugRelays: viper.GetBool(DebugRelaysFlagName), DisableConflictTransactions: viper.GetBool(common.DisableConflictTransactionsFlag), DisableRetryOnNodeErrors: viper.GetBool(common.DisableRetryOnNodeErrorsFlag), + OfflineSpecPath: viper.GetString(common.UseOfflineSpecFlag), + } + + // validate user is does not provide multi chain setup when using the offline spec feature. + if consumerPropagatedFlags.OfflineSpecPath != "" && len(rpcEndpoints) > 1 { + utils.LavaFormatFatal("offline spec modifications are supported only in single chain bootstrapping", nil, utils.LogAttr("len(rpcEndpoints)", len(rpcEndpoints)), utils.LogAttr("rpcEndpoints", rpcEndpoints)) } rpcConsumerSharedState := viper.GetBool(common.SharedStateFlag) @@ -615,6 +632,7 @@ rpcconsumer consumer_examples/full_consumer_example.yml --cache-be "127.0.0.1:77 cmdRPCConsumer.Flags().Bool(common.DisableConflictTransactionsFlag, false, "disabling conflict transactions, this flag should not be used as it harms the network's data reliability and therefore the service.") cmdRPCConsumer.Flags().DurationVar(&updaters.TimeOutForFetchingLavaBlocks, common.TimeOutForFetchingLavaBlocksFlag, time.Second*5, "setting the timeout for fetching lava blocks") cmdRPCConsumer.Flags().Bool(common.DisableRetryOnNodeErrorsFlag, false, "Disable relay retries on node errors, prevent the rpcconsumer trying a different provider") + cmdRPCConsumer.Flags().String(common.UseOfflineSpecFlag, "", "load offline spec provided path to spec file, used to test specs before they are proposed on chain") common.AddRollingLogConfig(cmdRPCConsumer) return cmdRPCConsumer diff --git a/protocol/rpcprovider/rpcprovider_server.go b/protocol/rpcprovider/rpcprovider_server.go index 272836f8c9..4de897dd9b 100644 --- a/protocol/rpcprovider/rpcprovider_server.go +++ b/protocol/rpcprovider/rpcprovider_server.go @@ -367,7 +367,18 @@ func (rpcps *RPCProviderServer) ValidateRequest(chainMessage chainlib.ChainMessa utils.Attribute{Key: "provider_requested_block", Value: reqBlock}, utils.Attribute{Key: "consumer_requested_block", Value: request.RelayData.RequestBlock}, utils.Attribute{Key: "GUID", Value: ctx}) - return utils.LavaFormatError("requested block mismatch between consumer and provider", nil, utils.LogAttr("method", chainMessage.GetApi().Name), utils.Attribute{Key: "provider_parsed_block_pre_update", Value: providerRequestedBlockPreUpdate}, utils.Attribute{Key: "provider_requested_block", Value: reqBlock}, utils.Attribute{Key: "consumer_requested_block", Value: request.RelayData.RequestBlock}, utils.Attribute{Key: "GUID", Value: ctx}, utils.Attribute{Key: "metadata", Value: request.RelayData.Metadata}) + // TODO, we need to return an error here, this was disabled so relays will pass, but it will cause data reliability issues. + // once we understand the issue return the error. + utils.LavaFormatError("requested block mismatch between consumer and provider", nil, + utils.LogAttr("request data", string(request.RelayData.Data)), + utils.LogAttr("request path", request.RelayData.ApiUrl), + utils.LogAttr("method", chainMessage.GetApi().Name), + utils.Attribute{Key: "provider_parsed_block_pre_update", Value: providerRequestedBlockPreUpdate}, + utils.Attribute{Key: "provider_requested_block", Value: reqBlock}, + utils.Attribute{Key: "consumer_requested_block", Value: request.RelayData.RequestBlock}, + utils.Attribute{Key: "GUID", Value: ctx}, + utils.Attribute{Key: "metadata", Value: request.RelayData.Metadata}, + ) } } return nil diff --git a/testutil/common/tester.go b/testutil/common/tester.go index fd5ca31827..d6437ac909 100644 --- a/testutil/common/tester.go +++ b/testutil/common/tester.go @@ -16,6 +16,7 @@ import ( stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types" testkeeper "github.com/lavanet/lava/v2/testutil/keeper" "github.com/lavanet/lava/v2/utils" + specutils "github.com/lavanet/lava/v2/utils/keeper" "github.com/lavanet/lava/v2/utils/lavaslices" "github.com/lavanet/lava/v2/utils/sigs" dualstakingante "github.com/lavanet/lava/v2/x/dualstaking/ante" @@ -1127,7 +1128,7 @@ func (ts *Tester) SetupForTests(getToTopMostPath string, specId string, validato } sdkContext := sdk.UnwrapSDKContext(ts.Ctx) - spec, err := testkeeper.GetASpec(specId, getToTopMostPath, &sdkContext, &ts.Keepers.Spec) + spec, err := specutils.GetASpec(specId, getToTopMostPath, &sdkContext, &ts.Keepers.Spec) if err != nil { return err } diff --git a/testutil/keeper/spec.go b/utils/keeper/spec.go similarity index 56% rename from testutil/keeper/spec.go rename to utils/keeper/spec.go index 3574f15040..f76965c5a4 100644 --- a/testutil/keeper/spec.go +++ b/utils/keeper/spec.go @@ -5,7 +5,6 @@ import ( "encoding/json" "fmt" "os" - "strings" "testing" tmdb "github.com/cometbft/cometbft-db" @@ -67,7 +66,20 @@ func specKeeper() (*keeper.Keeper, sdk.Context, error) { return k, ctx, nil } -func GetASpec(specIndex, getToTopMostPath string, ctxArg *sdk.Context, keeper *keeper.Keeper) (specRet spectypes.Spec, err error) { +func decodeProposal(path string) (utils.SpecAddProposalJSON, error) { + proposal := utils.SpecAddProposalJSON{} + contents, err := os.ReadFile(path) + if err != nil { + return proposal, err + } + decoder := json.NewDecoder(bytes.NewReader(contents)) + decoder.DisallowUnknownFields() // This will make the unmarshal fail if there are unused fields + + err = decoder.Decode(&proposal) + return proposal, err +} + +func GetSpecFromPath(path string, specIndex string, ctxArg *sdk.Context, keeper *keeper.Keeper) (specRet spectypes.Spec, err error) { var ctx sdk.Context if keeper == nil || ctxArg == nil { keeper, ctx, err = specKeeper() @@ -77,31 +89,48 @@ func GetASpec(specIndex, getToTopMostPath string, ctxArg *sdk.Context, keeper *k } else { ctx = *ctxArg } - proposalFile := "./cookbook/specs/ibc.json,./cookbook/specs/cosmoswasm.json,./cookbook/specs/tendermint.json,./cookbook/specs/cosmossdk.json,./cookbook/specs/cosmossdk_full.json,./cookbook/specs/ethereum.json,./cookbook/specs/cosmoshub.json,./cookbook/specs/lava.json,./cookbook/specs/osmosis.json,./cookbook/specs/fantom.json,./cookbook/specs/celo.json,./cookbook/specs/optimism.json,./cookbook/specs/arbitrum.json,./cookbook/specs/starknet.json,./cookbook/specs/aptos.json,./cookbook/specs/juno.json,./cookbook/specs/polygon.json,./cookbook/specs/evmos.json,./cookbook/specs/base.json,./cookbook/specs/canto.json,./cookbook/specs/sui.json,./cookbook/specs/solana.json,./cookbook/specs/bsc.json,./cookbook/specs/axelar.json,./cookbook/specs/avalanche.json,./cookbook/specs/fvm.json" - for _, fileName := range strings.Split(proposalFile, ",") { - proposal := utils.SpecAddProposalJSON{} - contents, err := os.ReadFile(getToTopMostPath + fileName) + proposal, err := decodeProposal(path) + if err != nil { + return spectypes.Spec{}, err + } + + for _, spec := range proposal.Proposal.Specs { + keeper.SetSpec(ctx, spec) + if specIndex != spec.Index { + continue + } + fullspec, err := keeper.ExpandSpec(ctx, spec) if err != nil { return spectypes.Spec{}, err } - decoder := json.NewDecoder(bytes.NewReader(contents)) - decoder.DisallowUnknownFields() // This will make the unmarshal fail if there are unused fields + return fullspec, nil + } + return spectypes.Spec{}, fmt.Errorf("spec not found %s", path) +} - if err := decoder.Decode(&proposal); err != nil { +func GetASpec(specIndex, getToTopMostPath string, ctxArg *sdk.Context, keeper *keeper.Keeper) (specRet spectypes.Spec, err error) { + var ctx sdk.Context + if keeper == nil || ctxArg == nil { + keeper, ctx, err = specKeeper() + if err != nil { return spectypes.Spec{}, err } - - for _, spec := range proposal.Proposal.Specs { - keeper.SetSpec(ctx, spec) - if specIndex != spec.Index { - continue - } - fullspec, err := keeper.ExpandSpec(ctx, spec) - if err != nil { - return spectypes.Spec{}, err - } - return fullspec, nil + } else { + ctx = *ctxArg + } + proposalDirectory := "cookbook/specs/" + proposalFiles := []string{ + "ibc.json", "cosmoswasm.json", "tendermint.json", "cosmossdk.json", "cosmossdk_full.json", + "ethereum.json", "cosmoshub.json", "lava.json", "osmosis.json", "fantom.json", "celo.json", + "optimism.json", "arbitrum.json", "starknet.json", "aptos.json", "juno.json", "polygon.json", + "evmos.json", "base.json", "canto.json", "sui.json", "solana.json", "bsc.json", "axelar.json", + "avalanche.json", "fvm.json", "near.json", + } + for _, fileName := range proposalFiles { + spec, err := GetSpecFromPath(getToTopMostPath+proposalDirectory+fileName, specIndex, &ctx, keeper) + if err == nil { + return spec, nil } } return spectypes.Spec{}, fmt.Errorf("spec not found %s", specIndex) diff --git a/x/pairing/keeper/pairing_test.go b/x/pairing/keeper/pairing_test.go index 74165525af..a9cf57336b 100644 --- a/x/pairing/keeper/pairing_test.go +++ b/x/pairing/keeper/pairing_test.go @@ -10,6 +10,7 @@ import ( sdk "github.com/cosmos/cosmos-sdk/types" "github.com/lavanet/lava/v2/testutil/common" testkeeper "github.com/lavanet/lava/v2/testutil/keeper" + specutils "github.com/lavanet/lava/v2/utils/keeper" "github.com/lavanet/lava/v2/utils/lavaslices" "github.com/lavanet/lava/v2/utils/sigs" epochstoragetypes "github.com/lavanet/lava/v2/x/epochstorage/types" @@ -2212,7 +2213,7 @@ func TestMixBothExetensionAndAddonPairing(t *testing.T) { func TestMixSelectedProvidersAndArchivePairing(t *testing.T) { ts := newTester(t) ts.setupForPayments(1, 0, 0) // 1 provider, 0 client, default providers-to-pair - specEth, err := testkeeper.GetASpec("ETH1", "../../../", nil, nil) + specEth, err := specutils.GetASpec("ETH1", "../../../", nil, nil) if err != nil { require.NoError(t, err) } diff --git a/x/spec/ante/ante_test.go b/x/spec/ante/ante_test.go index 9047af0f1d..82571e5413 100644 --- a/x/spec/ante/ante_test.go +++ b/x/spec/ante/ante_test.go @@ -13,7 +13,7 @@ import ( v1 "github.com/cosmos/cosmos-sdk/x/gov/types/v1" "github.com/cosmos/gogoproto/proto" "github.com/lavanet/lava/v2/app" - testkeeper "github.com/lavanet/lava/v2/testutil/keeper" + specutils "github.com/lavanet/lava/v2/utils/keeper" plantypes "github.com/lavanet/lava/v2/x/plans/types" "github.com/lavanet/lava/v2/x/spec/ante" spectypes "github.com/lavanet/lava/v2/x/spec/types" @@ -181,7 +181,7 @@ func TestNewExpeditedProposalFilterAnteDecorator(t *testing.T) { tt := tt t.Run(tt.name, func(t *testing.T) { - k, ctx := testkeeper.SpecKeeper(t) + k, ctx := specutils.SpecKeeper(t) params := spectypes.DefaultParams() params.AllowlistedExpeditedMsgs = []string{ proto.MessageName(&banktypes.MsgSend{}), diff --git a/x/spec/genesis_test.go b/x/spec/genesis_test.go index 8604f72bc1..9faaa7e9ae 100644 --- a/x/spec/genesis_test.go +++ b/x/spec/genesis_test.go @@ -6,8 +6,8 @@ import ( types2 "github.com/cosmos/cosmos-sdk/x/auth/types" "github.com/cosmos/gogoproto/proto" - keepertest "github.com/lavanet/lava/v2/testutil/keeper" "github.com/lavanet/lava/v2/testutil/nullify" + specutils "github.com/lavanet/lava/v2/utils/keeper" "github.com/lavanet/lava/v2/x/spec" "github.com/lavanet/lava/v2/x/spec/types" "github.com/stretchr/testify/require" @@ -32,7 +32,7 @@ func TestGenesis(t *testing.T) { // this line is used by starport scaffolding # genesis/test/state } - k, ctx := keepertest.SpecKeeper(t) + k, ctx := specutils.SpecKeeper(t) spec.InitGenesis(ctx, *k, genesisState) got := spec.ExportGenesis(ctx, *k) require.NotNil(t, got) diff --git a/x/spec/keeper/grpc_query_params_test.go b/x/spec/keeper/grpc_query_params_test.go index ada5f2d3f0..5d94e82188 100644 --- a/x/spec/keeper/grpc_query_params_test.go +++ b/x/spec/keeper/grpc_query_params_test.go @@ -4,13 +4,13 @@ import ( "testing" sdk "github.com/cosmos/cosmos-sdk/types" - testkeeper "github.com/lavanet/lava/v2/testutil/keeper" + specutils "github.com/lavanet/lava/v2/utils/keeper" "github.com/lavanet/lava/v2/x/spec/types" "github.com/stretchr/testify/require" ) func TestParamsQuery(t *testing.T) { - keeper, ctx := testkeeper.SpecKeeper(t) + keeper, ctx := specutils.SpecKeeper(t) wctx := sdk.WrapSDKContext(ctx) params := types.DefaultParams() keeper.SetParams(ctx, params) diff --git a/x/spec/keeper/grpc_query_spec_test.go b/x/spec/keeper/grpc_query_spec_test.go index 0b1e33d1a8..ad97b52fbf 100644 --- a/x/spec/keeper/grpc_query_spec_test.go +++ b/x/spec/keeper/grpc_query_spec_test.go @@ -10,8 +10,8 @@ import ( "google.golang.org/grpc/codes" "google.golang.org/grpc/status" - keepertest "github.com/lavanet/lava/v2/testutil/keeper" "github.com/lavanet/lava/v2/testutil/nullify" + specutils "github.com/lavanet/lava/v2/utils/keeper" "github.com/lavanet/lava/v2/x/spec/types" ) @@ -19,7 +19,7 @@ import ( var _ = strconv.IntSize func TestSpecQuerySingle(t *testing.T) { - keeper, ctx := keepertest.SpecKeeper(t) + keeper, ctx := specutils.SpecKeeper(t) wctx := sdk.WrapSDKContext(ctx) msgs := createNSpec(keeper, ctx, 2) for _, tc := range []struct { @@ -70,7 +70,7 @@ func TestSpecQuerySingle(t *testing.T) { } func TestSpecQuerySingleRaw(t *testing.T) { - keeper, ctx := keepertest.SpecKeeper(t) + keeper, ctx := specutils.SpecKeeper(t) wctx := sdk.WrapSDKContext(ctx) msgs := createNSpec(keeper, ctx, 2) @@ -98,7 +98,7 @@ func TestSpecQuerySingleRaw(t *testing.T) { } func TestSpecQueryPaginated(t *testing.T) { - keeper, ctx := keepertest.SpecKeeper(t) + keeper, ctx := specutils.SpecKeeper(t) wctx := sdk.WrapSDKContext(ctx) msgs := createNSpec(keeper, ctx, 5) diff --git a/x/spec/keeper/msg_server_test.go b/x/spec/keeper/msg_server_test.go index 241f721f95..b15805f52b 100644 --- a/x/spec/keeper/msg_server_test.go +++ b/x/spec/keeper/msg_server_test.go @@ -5,12 +5,12 @@ import ( "testing" sdk "github.com/cosmos/cosmos-sdk/types" - keepertest "github.com/lavanet/lava/v2/testutil/keeper" + specutils "github.com/lavanet/lava/v2/utils/keeper" "github.com/lavanet/lava/v2/x/spec/keeper" "github.com/lavanet/lava/v2/x/spec/types" ) func setupMsgServer(t testing.TB) (types.MsgServer, context.Context) { - k, ctx := keepertest.SpecKeeper(t) + k, ctx := specutils.SpecKeeper(t) return keeper.NewMsgServerImpl(*k), sdk.WrapSDKContext(ctx) } diff --git a/x/spec/keeper/params_test.go b/x/spec/keeper/params_test.go index 089696024e..429f0d7410 100644 --- a/x/spec/keeper/params_test.go +++ b/x/spec/keeper/params_test.go @@ -3,13 +3,13 @@ package keeper_test import ( "testing" - testkeeper "github.com/lavanet/lava/v2/testutil/keeper" + specutils "github.com/lavanet/lava/v2/utils/keeper" "github.com/lavanet/lava/v2/x/spec/types" "github.com/stretchr/testify/require" ) func TestGetParams(t *testing.T) { - k, ctx := testkeeper.SpecKeeper(t) + k, ctx := specutils.SpecKeeper(t) params := types.DefaultParams() k.SetParams(ctx, params)