Skip to content

Commit

Permalink
[raft] add a test for ConfiguredClusters in the store (#7488)
Browse files Browse the repository at this point in the history
  • Loading branch information
luluz66 authored Sep 18, 2024
1 parent 3ba0238 commit fdd9561
Show file tree
Hide file tree
Showing 3 changed files with 29 additions and 16 deletions.
1 change: 1 addition & 0 deletions enterprise/server/raft/store/store.go
Original file line number Diff line number Diff line change
Expand Up @@ -278,6 +278,7 @@ func NewWithArgs(env environment.Env, rootDir string, nodeHost *dragonboat.NodeH
if err := nodeHost.StartOnDiskReplica(nil, false /*=join*/, s.ReplicaFactoryFn, rc); err != nil {
return nil, status.InternalErrorf("failed to start c%dn%d: %s", logInfo.ShardID, logInfo.ReplicaID, err)
}
s.configuredClusters++
} else {
replicaDescriptor := &rfpb.ReplicaDescriptor{RangeId: logInfo.ShardID, ReplicaId: logInfo.ReplicaID}
previouslyStartedReplicas = append(previouslyStartedReplicas, replicaDescriptor)
Expand Down
10 changes: 10 additions & 0 deletions enterprise/server/raft/store/store_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -56,6 +56,16 @@ func getMembership(t *testing.T, ts *testutil.TestingStore, ctx context.Context,
return replicas
}

func TestConfiguredClusters(t *testing.T) {
sf := testutil.NewStoreFactory(t)
s1 := sf.NewStore(t)
ctx := context.Background()
sf.StartShard(t, ctx, s1)
s1.Stop()
sf.RecreateStore(t, s1)
require.Equal(t, 2, s1.ConfiguredClusters())
}

func TestAddGetRemoveRange(t *testing.T) {
sf := testutil.NewStoreFactory(t)
s1 := sf.NewStore(t)
Expand Down
34 changes: 18 additions & 16 deletions enterprise/server/raft/testutil/testutil.go
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,6 @@ func localAddr(t *testing.T) string {

type StoreFactory struct {
rootDir string
fileDir string
gossipAddrs []string
reg registry.NodeRegistry
clock clockwork.Clock
Expand All @@ -58,7 +57,6 @@ func NewStoreFactoryWithClock(t *testing.T, clock clockwork.Clock) *StoreFactory
require.NoError(t, err)
return &StoreFactory{
rootDir: rootDir,
fileDir: fileDir,
reg: registry.NewStaticNodeRegistry(1, nil),
clock: clock,
}
Expand All @@ -74,19 +72,7 @@ func (sf *StoreFactory) Registry() registry.NodeRegistry {
return sf.reg
}

func (sf *StoreFactory) NewStore(t *testing.T) *TestingStore {
nodeAddr := localAddr(t)
gm, err := gossip.New("name-"+nodeAddr, nodeAddr, sf.gossipAddrs)
require.NoError(t, err)
sf.gossipAddrs = append(sf.gossipAddrs, nodeAddr)

ts := &TestingStore{
t: t,
gm: gm,
RaftAddress: localAddr(t),
GRPCAddress: localAddr(t),
RootDir: filepath.Join(sf.rootDir, fmt.Sprintf("store-%d", len(sf.gossipAddrs))),
}
func (sf *StoreFactory) RecreateStore(t *testing.T, ts *TestingStore) {
require.Nil(t, disk.EnsureDirectoryExists(ts.RootDir))

reg := sf.reg
Expand Down Expand Up @@ -127,7 +113,7 @@ func (sf *StoreFactory) NewStore(t *testing.T) *TestingStore {
require.NoError(t, err)
leaser := pebble.NewDBLeaser(db)
ts.leaser = leaser
store, err := store.NewWithArgs(te, ts.RootDir, nodeHost, gm, s, reg, raftListener, apiClient, ts.GRPCAddress, partitions, db, leaser)
store, err := store.NewWithArgs(te, ts.RootDir, nodeHost, ts.gm, s, reg, raftListener, apiClient, ts.GRPCAddress, partitions, db, leaser)
require.NoError(t, err)
require.NotNil(t, store)
store.Start()
Expand All @@ -136,6 +122,22 @@ func (sf *StoreFactory) NewStore(t *testing.T) *TestingStore {
t.Cleanup(func() {
ts.Stop()
})
}

func (sf *StoreFactory) NewStore(t *testing.T) *TestingStore {
nodeAddr := localAddr(t)
gm, err := gossip.New("name-"+nodeAddr, nodeAddr, sf.gossipAddrs)
require.NoError(t, err)
sf.gossipAddrs = append(sf.gossipAddrs, nodeAddr)

ts := &TestingStore{
t: t,
gm: gm,
RaftAddress: localAddr(t),
GRPCAddress: localAddr(t),
RootDir: filepath.Join(sf.rootDir, fmt.Sprintf("store-%d", len(sf.gossipAddrs))),
}
sf.RecreateStore(t, ts)
return ts
}

Expand Down

0 comments on commit fdd9561

Please sign in to comment.