From acbb9afaae4a534d77e71d8aa9a9af57e21c0567 Mon Sep 17 00:00:00 2001 From: Hu# Date: Tue, 16 Apr 2024 18:03:06 +0800 Subject: [PATCH 01/17] ci: transitioning action version from node 16 to node 20 (#8071) close tikv/pd#8070 Signed-off-by: husharp Co-authored-by: ti-chi-bot[bot] <108142056+ti-chi-bot[bot]@users.noreply.github.com> --- .github/workflows/check.yaml | 16 +++---------- .github/workflows/label.yaml | 2 +- .github/workflows/pd-docker-image.yaml | 6 ++--- .github/workflows/pd-tests.yaml | 26 +++++++-------------- .github/workflows/tso-consistency-test.yaml | 6 ++--- .github/workflows/tso-function-test.yaml | 6 ++--- 6 files changed, 22 insertions(+), 40 deletions(-) diff --git a/.github/workflows/check.yaml b/.github/workflows/check.yaml index e2bf99c026f..3ccb2635e9f 100644 --- a/.github/workflows/check.yaml +++ b/.github/workflows/check.yaml @@ -8,21 +8,11 @@ jobs: runs-on: ubuntu-latest timeout-minutes: 20 steps: - - uses: actions/setup-go@v3 - with: - go-version: '1.21' - name: Checkout code - uses: actions/checkout@v3 - - name: Restore cache - uses: actions/cache@v3 + uses: actions/checkout@v4 + - uses: actions/setup-go@v5 with: - path: | - ~/go/pkg/mod - ~/.cache/go-build - **/.dashboard_download_cache - key: ${{ runner.os }}-golang-${{ hashFiles('**/go.sum') }} - restore-keys: | - ${{ runner.os }}-golang + go-version: '1.21' - name: Make Check run: | SWAGGER=1 make build diff --git a/.github/workflows/label.yaml b/.github/workflows/label.yaml index 5ff2b895528..00438d26b63 100644 --- a/.github/workflows/label.yaml +++ b/.github/workflows/label.yaml @@ -7,7 +7,7 @@ jobs: add_labels: runs-on: ubuntu-latest steps: - - uses: actions/github-script@v4 + - uses: actions/github-script@v7 name: Add labels with: script: | diff --git a/.github/workflows/pd-docker-image.yaml b/.github/workflows/pd-docker-image.yaml index 2a04c030016..5beaa66c156 100644 --- a/.github/workflows/pd-docker-image.yaml +++ b/.github/workflows/pd-docker-image.yaml @@ -15,10 +15,10 @@ jobs: strategy: fail-fast: true steps: - - uses: actions/setup-go@v3 + - name: Checkout code + uses: actions/checkout@v4 + - uses: actions/setup-go@v5 with: go-version: '1.21' - - name: Checkout code - uses: actions/checkout@v3 - name: Make run: make docker-image diff --git a/.github/workflows/pd-tests.yaml b/.github/workflows/pd-tests.yaml index 1508c1a1457..9084c7545a8 100644 --- a/.github/workflows/pd-tests.yaml +++ b/.github/workflows/pd-tests.yaml @@ -29,20 +29,11 @@ jobs: outputs: job-total: 13 steps: - - uses: actions/setup-go@v3 - with: - go-version: '1.21' - name: Checkout code - uses: actions/checkout@v3 - - name: Restore cache - uses: actions/cache@v3 + uses: actions/checkout@v4 + - uses: actions/setup-go@v5 with: - path: | - ~/go/pkg/mod - ~/.cache/go-build - **/.tools - **/.dashboard_download_cache - key: ${{ runner.os }}-go-${{ matrix.worker_id }}-${{ hashFiles('**/go.sum') }} + go-version: '1.21' - name: Make Test env: WORKER_ID: ${{ matrix.worker_id }} @@ -53,20 +44,21 @@ jobs: mv covprofile covprofile_$WORKER_ID sed -i "/failpoint_binding/d" covprofile_$WORKER_ID - name: Upload coverage result ${{ matrix.worker_id }} - uses: actions/upload-artifact@v2 + uses: actions/upload-artifact@v4 with: - name: cover-reports + name: cover-reports-${{ matrix.worker_id }} path: covprofile_${{ matrix.worker_id }} report-coverage: needs: chunks runs-on: ubuntu-latest steps: - name: Checkout code - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Download chunk report - uses: actions/download-artifact@v2 + uses: actions/download-artifact@v4 with: - name: cover-reports + pattern: cover-reports-* + merge-multiple: true - name: Merge env: TOTAL_JOBS: ${{needs.chunks.outputs.job-total}} diff --git a/.github/workflows/tso-consistency-test.yaml b/.github/workflows/tso-consistency-test.yaml index 570cbbc5da8..3cb24898a10 100644 --- a/.github/workflows/tso-consistency-test.yaml +++ b/.github/workflows/tso-consistency-test.yaml @@ -8,10 +8,10 @@ jobs: tso-consistency-test: runs-on: ubuntu-latest steps: - - uses: actions/setup-go@v3 + - name: Checkout code + uses: actions/checkout@v4 + - uses: actions/setup-go@v5 with: go-version: '1.21' - - name: Checkout code - uses: actions/checkout@v3 - name: Make TSO Consistency Test run: make test-tso-consistency diff --git a/.github/workflows/tso-function-test.yaml b/.github/workflows/tso-function-test.yaml index d7780425d30..13fd6fe7df6 100644 --- a/.github/workflows/tso-function-test.yaml +++ b/.github/workflows/tso-function-test.yaml @@ -21,10 +21,10 @@ jobs: tso-function-test: runs-on: ubuntu-latest steps: - - uses: actions/setup-go@v3 + - name: Checkout code + uses: actions/checkout@v4 + - uses: actions/setup-go@v5 with: go-version: '1.21' - - name: Checkout code - uses: actions/checkout@v3 - name: Make TSO Function Test run: make test-tso-function From 4c0a862440206c387ebf1589f1fcf657fb626cd4 Mon Sep 17 00:00:00 2001 From: Ryan Leung Date: Tue, 16 Apr 2024 20:49:35 +0800 Subject: [PATCH 02/17] *: make `TestStores` stable (#8078) close tikv/pd#7954 Signed-off-by: Ryan Leung Co-authored-by: Hu# --- tests/integrations/mcs/scheduling/api_test.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tests/integrations/mcs/scheduling/api_test.go b/tests/integrations/mcs/scheduling/api_test.go index f615e879e40..e9033e5016a 100644 --- a/tests/integrations/mcs/scheduling/api_test.go +++ b/tests/integrations/mcs/scheduling/api_test.go @@ -651,6 +651,9 @@ func (suite *apiTestSuite) checkStores(cluster *tests.TestCluster) { tests.MustPutRegion(re, cluster, 3, 6, []byte("a"), []byte("b")) for _, store := range stores { tests.MustPutStore(re, cluster, store) + if store.GetId() == 6 { + cluster.GetLeaderServer().GetRaftCluster().GetBasicCluster().UpdateStoreStatus(6) + } } // Test /stores apiServerAddr := cluster.GetLeaderServer().GetAddr() From 2dd230101346c430397947bbe1028fb46411c98b Mon Sep 17 00:00:00 2001 From: Ryan Leung Date: Wed, 17 Apr 2024 16:51:06 +0800 Subject: [PATCH 03/17] *: make `TestKeyspaceGroupMergeIntoDefault` stable (#8080) close tikv/pd#6991 Signed-off-by: Ryan Leung Co-authored-by: ti-chi-bot[bot] <108142056+ti-chi-bot[bot]@users.noreply.github.com> --- tests/integrations/mcs/tso/keyspace_group_manager_test.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/tests/integrations/mcs/tso/keyspace_group_manager_test.go b/tests/integrations/mcs/tso/keyspace_group_manager_test.go index 9194811cd37..909972f0315 100644 --- a/tests/integrations/mcs/tso/keyspace_group_manager_test.go +++ b/tests/integrations/mcs/tso/keyspace_group_manager_test.go @@ -781,13 +781,12 @@ func (suite *tsoKeyspaceGroupManagerTestSuite) TestKeyspaceGroupMergeIntoDefault Keyspaces: []uint32{uint32(i)}, }) keyspaces = append(keyspaces, uint32(i)) - if len(keyspaceGroups) < etcdutil.MaxEtcdTxnOps/2 && i != keyspaceGroupNum { + if i != keyspaceGroupNum { continue } handlersutil.MustCreateKeyspaceGroup(re, suite.pdLeaderServer, &handlers.CreateKeyspaceGroupParams{ KeyspaceGroups: keyspaceGroups, }) - keyspaceGroups = keyspaceGroups[:0] } // Check if all the keyspace groups are created. groups := handlersutil.MustLoadKeyspaceGroups(re, suite.pdLeaderServer, "0", "0") From bf234b0e4bc4b49f665aea413237d4292393bcfb Mon Sep 17 00:00:00 2001 From: lhy1024 Date: Wed, 17 Apr 2024 17:49:36 +0800 Subject: [PATCH 04/17] tests: reduce duplicate configurations in hot scheduler tests (#8081) ref tikv/pd#8073 Signed-off-by: lhy1024 Co-authored-by: ti-chi-bot[bot] <108142056+ti-chi-bot[bot]@users.noreply.github.com> --- pkg/schedule/schedulers/hot_region_test.go | 69 +------------------ pkg/schedule/schedulers/hot_region_v2_test.go | 22 ------ pkg/schedule/schedulers/scheduler_test.go | 5 +- 3 files changed, 4 insertions(+), 92 deletions(-) diff --git a/pkg/schedule/schedulers/hot_region_test.go b/pkg/schedule/schedulers/hot_region_test.go index cfc5196909f..e5b722a488d 100644 --- a/pkg/schedule/schedulers/hot_region_test.go +++ b/pkg/schedule/schedulers/hot_region_test.go @@ -43,6 +43,9 @@ func init() { // TODO: remove this global variable in the future. // And use a function to create hot schduler for test. schedulePeerPr = 1.0 + // disable denoising in test. + statistics.Denoising = false + statisticsInterval = 0 RegisterScheduler(utils.Write.String(), func(opController *operator.Controller, _ endpoint.ConfigStorage, _ ConfigDecoder, _ ...func(string) error) (Scheduler, error) { cfg := initHotRegionScheduleConfig() return newHotWriteScheduler(opController, cfg), nil @@ -200,10 +203,8 @@ func checkGCPendingOpInfos(re *require.Assertions, enablePlacementRules bool) { func TestSplitIfRegionTooHot(t *testing.T) { re := require.New(t) - statistics.Denoising = false cancel, _, tc, oc := prepareSchedulersTest() defer cancel() - tc.SetHotRegionCacheHitsThreshold(1) hb, err := CreateScheduler(utils.Read.String(), oc, storage.NewStorageWithMemoryBackend(), nil) re.NoError(err) b := &metapb.Buckets{ @@ -274,9 +275,7 @@ func TestSplitIfRegionTooHot(t *testing.T) { func TestSplitBucketsBySize(t *testing.T) { re := require.New(t) - statistics.Denoising = false cancel, _, tc, oc := prepareSchedulersTest() - tc.SetHotRegionCacheHitsThreshold(1) tc.SetRegionBucketEnabled(true) defer cancel() hb, err := CreateScheduler(utils.Read.String(), oc, storage.NewStorageWithMemoryBackend(), nil) @@ -327,9 +326,7 @@ func TestSplitBucketsBySize(t *testing.T) { func TestSplitBucketsByLoad(t *testing.T) { re := require.New(t) - statistics.Denoising = false cancel, _, tc, oc := prepareSchedulersTest() - tc.SetHotRegionCacheHitsThreshold(1) tc.SetRegionBucketEnabled(true) defer cancel() hb, err := CreateScheduler(utils.Read.String(), oc, storage.NewStorageWithMemoryBackend(), nil) @@ -388,8 +385,6 @@ func TestSplitBucketsByLoad(t *testing.T) { func TestHotWriteRegionScheduleByteRateOnly(t *testing.T) { re := require.New(t) - statistics.Denoising = false - statisticsInterval = 0 checkHotWriteRegionScheduleByteRateOnly(re, false /* disable placement rules */) checkHotWriteRegionScheduleByteRateOnly(re, true /* enable placement rules */) checkHotWriteRegionPlacement(re, true) @@ -406,7 +401,6 @@ func checkHotWriteRegionPlacement(re *require.Assertions, enablePlacementRules b hb, err := CreateScheduler(utils.Write.String(), oc, storage.NewStorageWithMemoryBackend(), nil) re.NoError(err) hb.(*hotScheduler).conf.SetHistorySampleDuration(0) - tc.SetHotRegionCacheHitsThreshold(0) tc.AddLabelsStore(1, 2, map[string]string{"zone": "z1", "host": "h1"}) tc.AddLabelsStore(2, 2, map[string]string{"zone": "z1", "host": "h2"}) @@ -633,12 +627,9 @@ func checkHotWriteRegionScheduleByteRateOnly(re *require.Assertions, enablePlace func TestHotWriteRegionScheduleByteRateOnlyWithTiFlash(t *testing.T) { re := require.New(t) - statistics.Denoising = false - statisticsInterval = 0 cancel, _, tc, oc := prepareSchedulersTest() defer cancel() tc.SetClusterVersion(versioninfo.MinSupportedVersion(versioninfo.ConfChangeV2)) - tc.SetHotRegionCacheHitsThreshold(0) re.NoError(tc.RuleManager.SetRules([]*placement.Rule{ { GroupID: placement.DefaultGroupID, @@ -853,8 +844,6 @@ func TestHotWriteRegionScheduleWithQuery(t *testing.T) { }() cancel, _, tc, oc := prepareSchedulersTest() defer cancel() - statistics.Denoising = false - statisticsInterval = 0 hb, err := CreateScheduler(utils.Write.String(), oc, storage.NewStorageWithMemoryBackend(), nil) re.NoError(err) @@ -863,7 +852,6 @@ func TestHotWriteRegionScheduleWithQuery(t *testing.T) { hb.(*hotScheduler).conf.WriteLeaderPriorities = []string{utils.QueryPriority, utils.BytePriority} hb.(*hotScheduler).conf.SetHistorySampleDuration(0) - tc.SetHotRegionCacheHitsThreshold(0) tc.AddRegionStore(1, 20) tc.AddRegionStore(2, 20) tc.AddRegionStore(3, 20) @@ -888,8 +876,6 @@ func TestHotWriteRegionScheduleWithQuery(t *testing.T) { func TestHotWriteRegionScheduleWithKeyRate(t *testing.T) { re := require.New(t) - statistics.Denoising = false - statisticsInterval = 0 cancel, _, tc, oc := prepareSchedulersTest() defer cancel() @@ -901,7 +887,6 @@ func TestHotWriteRegionScheduleWithKeyRate(t *testing.T) { hb.(*hotScheduler).conf.RankFormulaVersion = "v1" hb.(*hotScheduler).conf.SetHistorySampleDuration(0) - tc.SetHotRegionCacheHitsThreshold(0) tc.SetClusterVersion(versioninfo.MinSupportedVersion(versioninfo.Version4_0)) tc.AddRegionStore(1, 20) tc.AddRegionStore(2, 20) @@ -948,8 +933,6 @@ func TestHotWriteRegionScheduleWithKeyRate(t *testing.T) { func TestHotWriteRegionScheduleUnhealthyStore(t *testing.T) { re := require.New(t) - statistics.Denoising = false - statisticsInterval = 0 cancel, _, tc, oc := prepareSchedulersTest() defer cancel() @@ -958,7 +941,6 @@ func TestHotWriteRegionScheduleUnhealthyStore(t *testing.T) { hb.(*hotScheduler).conf.SetDstToleranceRatio(1) hb.(*hotScheduler).conf.SetSrcToleranceRatio(1) - tc.SetHotRegionCacheHitsThreshold(0) tc.SetClusterVersion(versioninfo.MinSupportedVersion(versioninfo.Version4_0)) tc.AddRegionStore(1, 20) tc.AddRegionStore(2, 20) @@ -996,8 +978,6 @@ func TestHotWriteRegionScheduleUnhealthyStore(t *testing.T) { func TestHotWriteRegionScheduleCheckHot(t *testing.T) { re := require.New(t) - statistics.Denoising = false - statisticsInterval = 0 cancel, _, tc, oc := prepareSchedulersTest() defer cancel() @@ -1006,7 +986,6 @@ func TestHotWriteRegionScheduleCheckHot(t *testing.T) { hb.(*hotScheduler).conf.SetDstToleranceRatio(1) hb.(*hotScheduler).conf.SetSrcToleranceRatio(1) - tc.SetHotRegionCacheHitsThreshold(0) tc.SetClusterVersion(versioninfo.MinSupportedVersion(versioninfo.Version4_0)) tc.AddRegionStore(1, 20) tc.AddRegionStore(2, 20) @@ -1031,8 +1010,6 @@ func TestHotWriteRegionScheduleCheckHot(t *testing.T) { func TestHotWriteRegionScheduleWithLeader(t *testing.T) { re := require.New(t) - statistics.Denoising = false - statisticsInterval = 0 cancel, _, tc, oc := prepareSchedulersTest() defer cancel() @@ -1041,7 +1018,6 @@ func TestHotWriteRegionScheduleWithLeader(t *testing.T) { hb.(*hotScheduler).conf.SetHistorySampleDuration(0) re.NoError(err) - tc.SetHotRegionCacheHitsThreshold(0) tc.AddRegionStore(1, 20) tc.AddRegionStore(2, 20) tc.AddRegionStore(3, 20) @@ -1094,8 +1070,6 @@ func TestHotWriteRegionScheduleWithLeader(t *testing.T) { func TestHotWriteRegionScheduleWithPendingInfluence(t *testing.T) { re := require.New(t) - statistics.Denoising = false - statisticsInterval = 0 checkHotWriteRegionScheduleWithPendingInfluence(re, 0) // 0: byte rate checkHotWriteRegionScheduleWithPendingInfluence(re, 1) // 1: key rate } @@ -1114,7 +1088,6 @@ func checkHotWriteRegionScheduleWithPendingInfluence(re *require.Assertions, dim pendingAmpFactor = old }() - tc.SetHotRegionCacheHitsThreshold(0) tc.SetClusterVersion(versioninfo.MinSupportedVersion(versioninfo.Version4_0)) tc.AddRegionStore(1, 20) tc.AddRegionStore(2, 20) @@ -1189,8 +1162,6 @@ func checkHotWriteRegionScheduleWithPendingInfluence(re *require.Assertions, dim func TestHotWriteRegionScheduleWithRuleEnabled(t *testing.T) { re := require.New(t) - statistics.Denoising = false - statisticsInterval = 0 cancel, _, tc, oc := prepareSchedulersTest() defer cancel() tc.SetEnablePlacementRules(true) @@ -1199,7 +1170,6 @@ func TestHotWriteRegionScheduleWithRuleEnabled(t *testing.T) { hb.(*hotScheduler).conf.WriteLeaderPriorities = []string{utils.KeyPriority, utils.BytePriority} hb.(*hotScheduler).conf.SetHistorySampleDuration(0) - tc.SetHotRegionCacheHitsThreshold(0) key, err := hex.DecodeString("") re.NoError(err) // skip stddev check @@ -1282,7 +1252,6 @@ func TestHotReadRegionScheduleByteRateOnly(t *testing.T) { hb := scheduler.(*hotScheduler) hb.conf.ReadPriorities = []string{utils.BytePriority, utils.KeyPriority} hb.conf.SetHistorySampleDuration(0) - tc.SetHotRegionCacheHitsThreshold(0) // Add stores 1, 2, 3, 4, 5 with region counts 3, 2, 2, 2, 0. tc.AddRegionStore(1, 3) @@ -1396,8 +1365,6 @@ func TestHotReadRegionScheduleByteRateOnly(t *testing.T) { func TestHotReadRegionScheduleWithQuery(t *testing.T) { re := require.New(t) - statistics.Denoising = false - statisticsInterval = 0 cancel, _, tc, oc := prepareSchedulersTest() defer cancel() @@ -1408,7 +1375,6 @@ func TestHotReadRegionScheduleWithQuery(t *testing.T) { hb.(*hotScheduler).conf.RankFormulaVersion = "v1" hb.(*hotScheduler).conf.SetHistorySampleDuration(0) - tc.SetHotRegionCacheHitsThreshold(0) tc.AddRegionStore(1, 20) tc.AddRegionStore(2, 20) tc.AddRegionStore(3, 20) @@ -1432,8 +1398,6 @@ func TestHotReadRegionScheduleWithQuery(t *testing.T) { func TestHotReadRegionScheduleWithKeyRate(t *testing.T) { re := require.New(t) - statistics.Denoising = false - statisticsInterval = 0 cancel, _, tc, oc := prepareSchedulersTest() defer cancel() @@ -1445,7 +1409,6 @@ func TestHotReadRegionScheduleWithKeyRate(t *testing.T) { hb.(*hotScheduler).conf.ReadPriorities = []string{utils.BytePriority, utils.KeyPriority} hb.(*hotScheduler).conf.SetHistorySampleDuration(0) - tc.SetHotRegionCacheHitsThreshold(0) tc.AddRegionStore(1, 20) tc.AddRegionStore(2, 20) tc.AddRegionStore(3, 20) @@ -1491,8 +1454,6 @@ func TestHotReadRegionScheduleWithKeyRate(t *testing.T) { func TestHotReadRegionScheduleWithPendingInfluence(t *testing.T) { re := require.New(t) - statistics.Denoising = false - statisticsInterval = 0 checkHotReadRegionScheduleWithPendingInfluence(re, 0) // 0: byte rate checkHotReadRegionScheduleWithPendingInfluence(re, 1) // 1: key rate } @@ -1515,7 +1476,6 @@ func checkHotReadRegionScheduleWithPendingInfluence(re *require.Assertions, dim pendingAmpFactor = old }() - tc.SetHotRegionCacheHitsThreshold(0) tc.SetClusterVersion(versioninfo.MinSupportedVersion(versioninfo.Version4_0)) tc.AddRegionStore(1, 20) tc.AddRegionStore(2, 20) @@ -1614,8 +1574,6 @@ func checkHotReadRegionScheduleWithPendingInfluence(re *require.Assertions, dim func TestHotReadWithEvictLeaderScheduler(t *testing.T) { re := require.New(t) - statistics.Denoising = false - statisticsInterval = 0 cancel, _, tc, oc := prepareSchedulersTest() defer cancel() @@ -1625,7 +1583,6 @@ func TestHotReadWithEvictLeaderScheduler(t *testing.T) { hb.(*hotScheduler).conf.SetDstToleranceRatio(1) hb.(*hotScheduler).conf.SetStrictPickingStore(false) hb.(*hotScheduler).conf.ReadPriorities = []string{utils.BytePriority, utils.KeyPriority} - tc.SetHotRegionCacheHitsThreshold(0) tc.SetClusterVersion(versioninfo.MinSupportedVersion(versioninfo.Version4_0)) tc.AddRegionStore(1, 20) tc.AddRegionStore(2, 20) @@ -1657,7 +1614,6 @@ func TestHotCacheUpdateCache(t *testing.T) { re := require.New(t) cancel, _, tc, _ := prepareSchedulersTest() defer cancel() - tc.SetHotRegionCacheHitsThreshold(0) // For read flow addRegionInfo(tc, utils.Read, []testRegionInfo{ @@ -1724,7 +1680,6 @@ func TestHotCacheKeyThresholds(t *testing.T) { { // only a few regions cancel, _, tc, _ := prepareSchedulersTest() defer cancel() - tc.SetHotRegionCacheHitsThreshold(0) addRegionInfo(tc, utils.Read, []testRegionInfo{ {1, []uint64{1, 2, 3}, 0, 1, 0}, {2, []uint64{1, 2, 3}, 0, 1 * units.KiB, 0}, @@ -1796,7 +1751,6 @@ func TestHotCacheByteAndKey(t *testing.T) { re := require.New(t) cancel, _, tc, _ := prepareSchedulersTest() defer cancel() - tc.SetHotRegionCacheHitsThreshold(0) statistics.ThresholdsUpdateInterval = 0 defer func() { statistics.ThresholdsUpdateInterval = 8 * time.Second @@ -2090,8 +2044,6 @@ func TestInfluenceByRWType(t *testing.T) { defer func() { schedulePeerPr = originValue }() - statistics.Denoising = false - statisticsInterval = 0 cancel, _, tc, oc := prepareSchedulersTest() defer cancel() @@ -2100,7 +2052,6 @@ func TestInfluenceByRWType(t *testing.T) { hb.(*hotScheduler).conf.SetDstToleranceRatio(1) hb.(*hotScheduler).conf.SetSrcToleranceRatio(1) hb.(*hotScheduler).conf.SetHistorySampleDuration(0) - tc.SetHotRegionCacheHitsThreshold(0) tc.SetClusterVersion(versioninfo.MinSupportedVersion(versioninfo.Version4_0)) tc.AddRegionStore(1, 20) tc.AddRegionStore(2, 20) @@ -2214,8 +2165,6 @@ func checkHotReadPeerSchedule(re *require.Assertions, enablePlacementRules bool) func TestHotScheduleWithPriority(t *testing.T) { re := require.New(t) - statistics.Denoising = false - statisticsInterval = 0 cancel, _, tc, oc := prepareSchedulersTest() defer cancel() @@ -2231,7 +2180,6 @@ func TestHotScheduleWithPriority(t *testing.T) { stddevThreshold = origin }() - tc.SetHotRegionCacheHitsThreshold(0) tc.SetClusterVersion(versioninfo.MinSupportedVersion(versioninfo.Version4_0)) tc.AddRegionStore(1, 20) tc.AddRegionStore(2, 20) @@ -2323,9 +2271,6 @@ func TestHotScheduleWithPriority(t *testing.T) { func TestHotScheduleWithStddev(t *testing.T) { re := require.New(t) - statistics.Denoising = false - statisticsInterval = 0 - cancel, _, tc, oc := prepareSchedulersTest() defer cancel() hb, err := CreateScheduler(utils.Write.String(), oc, storage.NewStorageWithMemoryBackend(), nil) @@ -2334,7 +2279,6 @@ func TestHotScheduleWithStddev(t *testing.T) { hb.(*hotScheduler).conf.SetSrcToleranceRatio(1.0) hb.(*hotScheduler).conf.RankFormulaVersion = "v1" tc.SetClusterVersion(versioninfo.MinSupportedVersion(versioninfo.Version4_0)) - tc.SetHotRegionCacheHitsThreshold(0) tc.AddRegionStore(1, 20) tc.AddRegionStore(2, 20) tc.AddRegionStore(3, 20) @@ -2384,9 +2328,6 @@ func TestHotScheduleWithStddev(t *testing.T) { func TestHotWriteLeaderScheduleWithPriority(t *testing.T) { re := require.New(t) - statistics.Denoising = false - statisticsInterval = 0 - cancel, _, tc, oc := prepareSchedulersTest() defer cancel() hb, err := CreateScheduler(utils.Write.String(), oc, storage.NewStorageWithMemoryBackend(), nil) @@ -2396,7 +2337,6 @@ func TestHotWriteLeaderScheduleWithPriority(t *testing.T) { hb.(*hotScheduler).conf.SetHistorySampleDuration(0) tc.SetClusterVersion(versioninfo.MinSupportedVersion(versioninfo.Version4_0)) - tc.SetHotRegionCacheHitsThreshold(0) tc.AddRegionStore(1, 20) tc.AddRegionStore(2, 20) tc.AddRegionStore(3, 20) @@ -2428,9 +2368,6 @@ func TestHotWriteLeaderScheduleWithPriority(t *testing.T) { func TestCompatibility(t *testing.T) { re := require.New(t) - statistics.Denoising = false - statisticsInterval = 0 - cancel, _, tc, oc := prepareSchedulersTest() defer cancel() hb, err := CreateScheduler(utils.Write.String(), oc, storage.NewStorageWithMemoryBackend(), nil) diff --git a/pkg/schedule/schedulers/hot_region_v2_test.go b/pkg/schedule/schedulers/hot_region_v2_test.go index 78a30cebaca..25d6d94f7b1 100644 --- a/pkg/schedule/schedulers/hot_region_v2_test.go +++ b/pkg/schedule/schedulers/hot_region_v2_test.go @@ -21,7 +21,6 @@ import ( "github.com/stretchr/testify/require" "github.com/tikv/pd/pkg/mock/mockcluster" "github.com/tikv/pd/pkg/schedule/operator" - "github.com/tikv/pd/pkg/statistics" "github.com/tikv/pd/pkg/statistics/utils" "github.com/tikv/pd/pkg/storage" "github.com/tikv/pd/pkg/utils/operatorutil" @@ -33,8 +32,6 @@ func TestHotWriteRegionScheduleWithRevertRegionsDimSecond(t *testing.T) { re := require.New(t) cancel, _, tc, oc := prepareSchedulersTest() defer cancel() - statistics.Denoising = false - statisticsInterval = 0 sche, err := CreateScheduler(utils.Write.String(), oc, storage.NewStorageWithMemoryBackend(), nil, nil) re.NoError(err) hb := sche.(*hotScheduler) @@ -43,7 +40,6 @@ func TestHotWriteRegionScheduleWithRevertRegionsDimSecond(t *testing.T) { hb.conf.SetRankFormulaVersion("v1") hb.conf.SetHistorySampleDuration(0) tc.SetClusterVersion(versioninfo.MinSupportedVersion(versioninfo.Version4_0)) - tc.SetHotRegionCacheHitsThreshold(0) tc.AddRegionStore(1, 20) tc.AddRegionStore(2, 20) tc.AddRegionStore(3, 20) @@ -96,8 +92,6 @@ func TestHotWriteRegionScheduleWithRevertRegionsDimFirst(t *testing.T) { re := require.New(t) cancel, _, tc, oc := prepareSchedulersTest() defer cancel() - statistics.Denoising = false - sche, err := CreateScheduler(utils.Write.String(), oc, storage.NewStorageWithMemoryBackend(), nil, nil) re.NoError(err) hb := sche.(*hotScheduler) @@ -106,7 +100,6 @@ func TestHotWriteRegionScheduleWithRevertRegionsDimFirst(t *testing.T) { hb.conf.SetRankFormulaVersion("v1") hb.conf.SetHistorySampleDuration(0) tc.SetClusterVersion(versioninfo.MinSupportedVersion(versioninfo.Version4_0)) - tc.SetHotRegionCacheHitsThreshold(0) tc.AddRegionStore(1, 20) tc.AddRegionStore(2, 20) tc.AddRegionStore(3, 20) @@ -148,9 +141,6 @@ func TestHotWriteRegionScheduleWithRevertRegionsDimFirst(t *testing.T) { func TestHotWriteRegionScheduleWithRevertRegionsDimFirstOnly(t *testing.T) { // This is a test that searchRevertRegions finds a solution of rank -2. re := require.New(t) - statistics.Denoising = false - statisticsInterval = 0 - cancel, _, tc, oc := prepareSchedulersTest() defer cancel() sche, err := CreateScheduler(utils.Write.String(), oc, storage.NewStorageWithMemoryBackend(), nil, nil) @@ -161,7 +151,6 @@ func TestHotWriteRegionScheduleWithRevertRegionsDimFirstOnly(t *testing.T) { hb.conf.SetRankFormulaVersion("v1") hb.conf.SetHistorySampleDuration(0) tc.SetClusterVersion(versioninfo.MinSupportedVersion(versioninfo.Version4_0)) - tc.SetHotRegionCacheHitsThreshold(0) tc.AddRegionStore(1, 20) tc.AddRegionStore(2, 20) tc.AddRegionStore(3, 20) @@ -212,9 +201,6 @@ func TestHotWriteRegionScheduleWithRevertRegionsDimFirstOnly(t *testing.T) { func TestHotReadRegionScheduleWithRevertRegionsDimSecond(t *testing.T) { // This is a test that searchRevertRegions finds a solution of rank -1. re := require.New(t) - statistics.Denoising = false - statisticsInterval = 0 - cancel, _, tc, oc := prepareSchedulersTest() defer cancel() sche, err := CreateScheduler(utils.Read.String(), oc, storage.NewStorageWithMemoryBackend(), nil, nil) @@ -225,7 +211,6 @@ func TestHotReadRegionScheduleWithRevertRegionsDimSecond(t *testing.T) { hb.conf.SetRankFormulaVersion("v1") hb.conf.SetHistorySampleDuration(0) tc.SetClusterVersion(versioninfo.MinSupportedVersion(versioninfo.Version4_0)) - tc.SetHotRegionCacheHitsThreshold(0) tc.AddRegionStore(1, 20) tc.AddRegionStore(2, 20) tc.AddRegionStore(3, 20) @@ -275,9 +260,6 @@ func TestHotReadRegionScheduleWithRevertRegionsDimSecond(t *testing.T) { func TestSkipUniformStore(t *testing.T) { re := require.New(t) - statistics.Denoising = false - statisticsInterval = 0 - cancel, _, tc, oc := prepareSchedulersTest() defer cancel() hb, err := CreateScheduler(utils.Read.String(), oc, storage.NewStorageWithMemoryBackend(), nil, nil) @@ -287,7 +269,6 @@ func TestSkipUniformStore(t *testing.T) { hb.(*hotScheduler).conf.SetRankFormulaVersion("v2") hb.(*hotScheduler).conf.ReadPriorities = []string{utils.BytePriority, utils.KeyPriority} hb.(*hotScheduler).conf.SetHistorySampleDuration(0) - tc.SetHotRegionCacheHitsThreshold(0) tc.AddRegionStore(1, 20) tc.AddRegionStore(2, 20) tc.AddRegionStore(3, 20) @@ -439,7 +420,6 @@ func checkHotReadRegionScheduleWithSmallHotRegion(re *require.Assertions, highLo addOtherRegions func(*mockcluster.Cluster, *hotScheduler)) []*operator.Operator { cancel, _, tc, oc := prepareSchedulersTest() defer cancel() - statistics.Denoising = false sche, err := CreateScheduler(utils.Read.String(), oc, storage.NewStorageWithMemoryBackend(), nil, nil) re.NoError(err) hb := sche.(*hotScheduler) @@ -447,7 +427,6 @@ func checkHotReadRegionScheduleWithSmallHotRegion(re *require.Assertions, highLo hb.conf.SetDstToleranceRatio(1) hb.conf.SetRankFormulaVersion("v2") hb.conf.ReadPriorities = []string{utils.QueryPriority, utils.BytePriority} - tc.SetHotRegionCacheHitsThreshold(0) tc.AddRegionStore(1, 40) tc.AddRegionStore(2, 10) tc.AddRegionStore(3, 10) @@ -470,7 +449,6 @@ func checkHotReadRegionScheduleWithSmallHotRegion(re *require.Assertions, highLo } } addRegionInfo(tc, utils.Read, regions) - tc.SetHotRegionCacheHitsThreshold(1) addOtherRegions(tc, hb) ops, _ := hb.Schedule(tc, false) return ops diff --git a/pkg/schedule/schedulers/scheduler_test.go b/pkg/schedule/schedulers/scheduler_test.go index 77c190ad943..d30ef3ad0aa 100644 --- a/pkg/schedule/schedulers/scheduler_test.go +++ b/pkg/schedule/schedulers/scheduler_test.go @@ -46,6 +46,7 @@ func prepareSchedulersTest(needToRunStream ...bool) (context.CancelFunc, config. stream = hbstream.NewTestHeartbeatStreams(ctx, tc.ID, tc, needToRunStream[0]) } oc := operator.NewController(ctx, tc.GetBasicCluster(), tc.GetSchedulerConfig(), stream) + tc.SetHotRegionCacheHitsThreshold(1) return cancel, opt, tc, oc } @@ -183,7 +184,6 @@ func checkBalance(re *require.Assertions, enablePlacementRules bool) { tc.AddLeaderRegionWithWriteInfo(1, 1, 512*units.KiB*utils.RegionHeartBeatReportInterval, 0, 0, utils.RegionHeartBeatReportInterval, []uint64{2, 3}) tc.AddLeaderRegionWithWriteInfo(2, 1, 512*units.KiB*utils.RegionHeartBeatReportInterval, 0, 0, utils.RegionHeartBeatReportInterval, []uint64{3, 4}) tc.AddLeaderRegionWithWriteInfo(3, 1, 512*units.KiB*utils.RegionHeartBeatReportInterval, 0, 0, utils.RegionHeartBeatReportInterval, []uint64{2, 4}) - tc.SetHotRegionCacheHitsThreshold(0) // try to get an operator var ops []*operator.Operator @@ -218,7 +218,6 @@ func TestHotRegionScheduleAbnormalReplica(t *testing.T) { tc.AddRegionWithReadInfo(1, 1, 512*units.KiB*utils.StoreHeartBeatReportInterval, 0, 0, utils.StoreHeartBeatReportInterval, []uint64{2}) tc.AddRegionWithReadInfo(2, 2, 512*units.KiB*utils.StoreHeartBeatReportInterval, 0, 0, utils.StoreHeartBeatReportInterval, []uint64{1, 3}) tc.AddRegionWithReadInfo(3, 1, 512*units.KiB*utils.StoreHeartBeatReportInterval, 0, 0, utils.StoreHeartBeatReportInterval, []uint64{2, 3}) - tc.SetHotRegionCacheHitsThreshold(0) re.True(tc.IsRegionHot(tc.GetRegion(1))) re.False(hb.IsScheduleAllowed(tc)) } @@ -318,7 +317,6 @@ func TestSpecialUseHotRegion(t *testing.T) { hs, err := CreateScheduler(utils.Write.String(), oc, storage, cd) re.NoError(err) - tc.SetHotRegionCacheHitsThreshold(0) tc.SetClusterVersion(versioninfo.MinSupportedVersion(versioninfo.Version4_0)) tc.AddRegionStore(1, 10) tc.AddRegionStore(2, 4) @@ -368,7 +366,6 @@ func TestSpecialUseReserved(t *testing.T) { bs, err := CreateScheduler(BalanceRegionType, oc, storage, cd) re.NoError(err) - tc.SetHotRegionCacheHitsThreshold(0) tc.SetClusterVersion(versioninfo.MinSupportedVersion(versioninfo.Version4_0)) tc.AddRegionStore(1, 10) tc.AddRegionStore(2, 4) From db88a43f7783e8768730f8ab0ef6056e9d2eff04 Mon Sep 17 00:00:00 2001 From: JmPotato Date: Wed, 17 Apr 2024 22:04:37 +0800 Subject: [PATCH 05/17] client/tso: use the TSO request pool at the tsoClient level to avoid data race (#8077) close tikv/pd#8055, ref tikv/pd#8076 Use the TSO request pool at the `tsoClient` level to avoid data race. Signed-off-by: JmPotato Co-authored-by: ti-chi-bot[bot] <108142056+ti-chi-bot[bot]@users.noreply.github.com> --- client/client.go | 33 ++++++-------- client/tso_client.go | 61 ++++++++++++------------- client/tso_dispatcher.go | 32 -------------- client/tso_request.go | 96 ++++++++++++++++++++++++++++++++++++++++ 4 files changed, 139 insertions(+), 83 deletions(-) create mode 100644 client/tso_request.go diff --git a/client/client.go b/client/client.go index 1852b77e4c6..eaebef7e10c 100644 --- a/client/client.go +++ b/client/client.go @@ -798,23 +798,7 @@ func (c *client) GetLocalTSAsync(ctx context.Context, dcLocation string) TSFutur defer span.Finish() } - req := c.getTSORequest(ctx, dcLocation) - if err := c.dispatchTSORequestWithRetry(req); err != nil { - req.tryDone(err) - } - return req -} - -func (c *client) getTSORequest(ctx context.Context, dcLocation string) *tsoRequest { - req := tsoReqPool.Get().(*tsoRequest) - // Set needed fields in the request before using it. - req.start = time.Now() - req.clientCtx = c.ctx - req.requestCtx = ctx - req.physical = 0 - req.logical = 0 - req.dcLocation = dcLocation - return req + return c.dispatchTSORequestWithRetry(ctx, dcLocation) } const ( @@ -822,10 +806,11 @@ const ( dispatchRetryCount = 2 ) -func (c *client) dispatchTSORequestWithRetry(req *tsoRequest) error { +func (c *client) dispatchTSORequestWithRetry(ctx context.Context, dcLocation string) TSFuture { var ( retryable bool err error + req *tsoRequest ) for i := 0; i < dispatchRetryCount; i++ { // Do not delay for the first time. @@ -838,12 +823,22 @@ func (c *client) dispatchTSORequestWithRetry(req *tsoRequest) error { err = errs.ErrClientGetTSO.FastGenByArgs("tso client is nil") continue } + // Get a new request from the pool if it's nil or not from the current pool. + if req == nil || req.pool != tsoClient.tsoReqPool { + req = tsoClient.getTSORequest(ctx, dcLocation) + } retryable, err = tsoClient.dispatchRequest(req) if !retryable { break } } - return err + if err != nil { + if req == nil { + return newTSORequestFastFail(err) + } + req.tryDone(err) + } + return req } func (c *client) GetTS(ctx context.Context) (physical int64, logical int64, err error) { diff --git a/client/tso_client.go b/client/tso_client.go index 5f8b12df36f..8185b99d1d0 100644 --- a/client/tso_client.go +++ b/client/tso_client.go @@ -43,33 +43,6 @@ type TSOClient interface { GetMinTS(ctx context.Context) (int64, int64, error) } -type tsoRequest struct { - start time.Time - clientCtx context.Context - requestCtx context.Context - done chan error - physical int64 - logical int64 - dcLocation string -} - -var tsoReqPool = sync.Pool{ - New: func() any { - return &tsoRequest{ - done: make(chan error, 1), - physical: 0, - logical: 0, - } - }, -} - -func (req *tsoRequest) tryDone(err error) { - select { - case req.done <- err: - default: - } -} - type tsoClient struct { ctx context.Context cancel context.CancelFunc @@ -84,6 +57,8 @@ type tsoClient struct { // tso allocator leader is switched. tsoAllocServingURLSwitchedCallback []func() + // tsoReqPool is the pool to recycle `*tsoRequest`. + tsoReqPool *sync.Pool // tsoDispatcher is used to dispatch different TSO requests to // the corresponding dc-location TSO channel. tsoDispatcher sync.Map // Same as map[string]*tsoDispatcher @@ -104,11 +79,20 @@ func newTSOClient( ) *tsoClient { ctx, cancel := context.WithCancel(ctx) c := &tsoClient{ - ctx: ctx, - cancel: cancel, - option: option, - svcDiscovery: svcDiscovery, - tsoStreamBuilderFactory: factory, + ctx: ctx, + cancel: cancel, + option: option, + svcDiscovery: svcDiscovery, + tsoStreamBuilderFactory: factory, + tsoReqPool: &sync.Pool{ + New: func() any { + return &tsoRequest{ + done: make(chan error, 1), + physical: 0, + logical: 0, + } + }, + }, checkTSDeadlineCh: make(chan struct{}), checkTSODispatcherCh: make(chan struct{}, 1), updateTSOConnectionCtxsCh: make(chan struct{}, 1), @@ -155,6 +139,19 @@ func (c *tsoClient) Close() { log.Info("tso client is closed") } +func (c *tsoClient) getTSORequest(ctx context.Context, dcLocation string) *tsoRequest { + req := c.tsoReqPool.Get().(*tsoRequest) + // Set needed fields in the request before using it. + req.start = time.Now() + req.pool = c.tsoReqPool + req.requestCtx = ctx + req.clientCtx = c.ctx + req.physical = 0 + req.logical = 0 + req.dcLocation = dcLocation + return req +} + // GetTSOAllocators returns {dc-location -> TSO allocator leader URL} connection map func (c *tsoClient) GetTSOAllocators() *sync.Map { return &c.tsoAllocators diff --git a/client/tso_dispatcher.go b/client/tso_dispatcher.go index ad3aa1c5d74..d02fdd52af8 100644 --- a/client/tso_dispatcher.go +++ b/client/tso_dispatcher.go @@ -115,38 +115,6 @@ func (c *tsoClient) dispatchRequest(request *tsoRequest) (bool, error) { return false, nil } -// TSFuture is a future which promises to return a TSO. -type TSFuture interface { - // Wait gets the physical and logical time, it would block caller if data is not available yet. - Wait() (int64, int64, error) -} - -func (req *tsoRequest) Wait() (physical int64, logical int64, err error) { - // If tso command duration is observed very high, the reason could be it - // takes too long for Wait() be called. - start := time.Now() - cmdDurationTSOAsyncWait.Observe(start.Sub(req.start).Seconds()) - select { - case err = <-req.done: - defer trace.StartRegion(req.requestCtx, "pdclient.tsoReqDone").End() - err = errors.WithStack(err) - defer tsoReqPool.Put(req) - if err != nil { - cmdFailDurationTSO.Observe(time.Since(req.start).Seconds()) - return 0, 0, err - } - physical, logical = req.physical, req.logical - now := time.Now() - cmdDurationWait.Observe(now.Sub(start).Seconds()) - cmdDurationTSO.Observe(now.Sub(req.start).Seconds()) - return - case <-req.requestCtx.Done(): - return 0, 0, errors.WithStack(req.requestCtx.Err()) - case <-req.clientCtx.Done(): - return 0, 0, errors.WithStack(req.clientCtx.Err()) - } -} - func (c *tsoClient) updateTSODispatcher() { // Set up the new TSO dispatcher and batch controller. c.GetTSOAllocators().Range(func(dcLocationKey, _ any) bool { diff --git a/client/tso_request.go b/client/tso_request.go new file mode 100644 index 00000000000..f30ceb5268a --- /dev/null +++ b/client/tso_request.go @@ -0,0 +1,96 @@ +// Copyright 2024 TiKV Project Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pd + +import ( + "context" + "runtime/trace" + "sync" + "time" + + "github.com/pingcap/errors" +) + +// TSFuture is a future which promises to return a TSO. +type TSFuture interface { + // Wait gets the physical and logical time, it would block caller if data is not available yet. + Wait() (int64, int64, error) +} + +var ( + _ TSFuture = (*tsoRequest)(nil) + _ TSFuture = (*tsoRequestFastFail)(nil) +) + +type tsoRequest struct { + requestCtx context.Context + clientCtx context.Context + done chan error + physical int64 + logical int64 + dcLocation string + + // Runtime fields. + start time.Time + pool *sync.Pool +} + +// tryDone tries to send the result to the channel, it will not block. +func (req *tsoRequest) tryDone(err error) { + select { + case req.done <- err: + default: + } +} + +// Wait will block until the TSO result is ready. +func (req *tsoRequest) Wait() (physical int64, logical int64, err error) { + // If tso command duration is observed very high, the reason could be it + // takes too long for Wait() be called. + start := time.Now() + cmdDurationTSOAsyncWait.Observe(start.Sub(req.start).Seconds()) + select { + case err = <-req.done: + defer trace.StartRegion(req.requestCtx, "pdclient.tsoReqDone").End() + defer req.pool.Put(req) + err = errors.WithStack(err) + if err != nil { + cmdFailDurationTSO.Observe(time.Since(req.start).Seconds()) + return 0, 0, err + } + physical, logical = req.physical, req.logical + now := time.Now() + cmdDurationWait.Observe(now.Sub(start).Seconds()) + cmdDurationTSO.Observe(now.Sub(req.start).Seconds()) + return + case <-req.requestCtx.Done(): + return 0, 0, errors.WithStack(req.requestCtx.Err()) + case <-req.clientCtx.Done(): + return 0, 0, errors.WithStack(req.clientCtx.Err()) + } +} + +type tsoRequestFastFail struct { + err error +} + +func newTSORequestFastFail(err error) *tsoRequestFastFail { + return &tsoRequestFastFail{err} +} + +// Wait returns the error directly. +func (req *tsoRequestFastFail) Wait() (physical int64, logical int64, err error) { + return 0, 0, req.err +} From afdd48f3026776eaa7cbf8172e824b05a0a332fa Mon Sep 17 00:00:00 2001 From: Ryan Leung Date: Wed, 17 Apr 2024 22:21:07 +0800 Subject: [PATCH 06/17] *: make `TestEtcdClientSync` stable (#8084) close tikv/pd#8085 Signed-off-by: Ryan Leung Co-authored-by: ti-chi-bot[bot] <108142056+ti-chi-bot[bot]@users.noreply.github.com> --- pkg/utils/etcdutil/etcdutil_test.go | 19 ++++++++++++++++--- 1 file changed, 16 insertions(+), 3 deletions(-) diff --git a/pkg/utils/etcdutil/etcdutil_test.go b/pkg/utils/etcdutil/etcdutil_test.go index 4c1e20fa73c..c402081fa2f 100644 --- a/pkg/utils/etcdutil/etcdutil_test.go +++ b/pkg/utils/etcdutil/etcdutil_test.go @@ -172,6 +172,7 @@ func TestEtcdClientSync(t *testing.T) { servers, client1, clean := NewTestEtcdCluster(t, 1) defer clean() etcd1, cfg1 := servers[0], servers[0].Config() + defer etcd1.Close() // Add a new member. etcd2 := MustAddEtcdMember(t, &cfg1, client1) @@ -180,10 +181,22 @@ func TestEtcdClientSync(t *testing.T) { // wait for etcd client sync endpoints checkEtcdEndpointNum(re, client1, 2) - // Remove the first member and close the etcd1. - _, err := RemoveEtcdMember(client1, uint64(etcd1.Server.ID())) + ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) + defer cancel() + // remove one member that is not the one we connected to. + resp, err := ListEtcdMembers(ctx, client1) + re.NoError(err) + + var memIDToRemove uint64 + for _, m := range resp.Members { + if m.ID != resp.Header.MemberId { + memIDToRemove = m.ID + break + } + } + + _, err = RemoveEtcdMember(client1, memIDToRemove) re.NoError(err) - etcd1.Close() // Check the client can get the new member with the new endpoints. checkEtcdEndpointNum(re, client1, 1) From 956684a0015e63b904b2ff6c1c6186f161f6de4b Mon Sep 17 00:00:00 2001 From: Shirly Date: Thu, 18 Apr 2024 10:12:06 +0800 Subject: [PATCH 07/17] cluster: print down peer when the related store is not disconnected (#8046) ref tikv/pd#4399 Co-authored-by: ti-chi-bot[bot] <108142056+ti-chi-bot[bot]@users.noreply.github.com> --- pkg/statistics/region_collection.go | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/pkg/statistics/region_collection.go b/pkg/statistics/region_collection.go index 488763142e1..cb0de6f601b 100644 --- a/pkg/statistics/region_collection.go +++ b/pkg/statistics/region_collection.go @@ -22,6 +22,7 @@ import ( sc "github.com/tikv/pd/pkg/schedule/config" "github.com/tikv/pd/pkg/schedule/placement" "github.com/tikv/pd/pkg/utils/syncutil" + "go.uber.org/zap" ) // RegionInfoProvider is an interface to provide the region information. @@ -250,6 +251,7 @@ func (r *RegionStatistics) Observe(region *core.RegionInfo, stores []*core.Store regionDownPeerDuration.Observe(float64(time.Now().Unix() - info.startDownPeerTS)) } else { info.startDownPeerTS = time.Now().Unix() + logDownPeerWithNoDisconnectedStore(region, stores) } } else if typ == MissPeer && len(region.GetVoters()) < desiredVoters { if info.startMissVoterPeerTS != 0 { @@ -440,3 +442,24 @@ func notIsolatedStoresWithLabel(stores []*core.StoreInfo, label string) [][]*cor } return res } + +// logDownPeerWithNoDisconnectedStore logs down peers on connected stores. +// It won't log down peer when any store of the replica is disconnected which is +// used to avoid too many logs when a store is disconnected. +// TODO: it's not a good way to log down peer during process region heartbeat, we should handle it in another way. +// region: the region which has down peer +// stores: all stores that the region has peer on them +func logDownPeerWithNoDisconnectedStore(region *core.RegionInfo, stores []*core.StoreInfo) { + for _, store := range stores { + if store.IsDisconnected() { + return + } + } + for _, p := range region.GetDownPeers() { + log.Warn("region has down peer on connected store", + zap.Uint64("region-id", region.GetID()), + zap.Uint64("down-peer", p.GetPeer().GetId()), + zap.Uint64("down-seconds", p.GetDownSeconds()), + zap.Uint64("store-id", p.GetPeer().GetStoreId())) + } +} From ad5ba18205eb0ef2d3b4c609ed5a8a2d786dc0c4 Mon Sep 17 00:00:00 2001 From: Ryan Leung Date: Thu, 18 Apr 2024 11:45:07 +0800 Subject: [PATCH 08/17] *: make `TestConcurrentlyReset` stable (#8082) close tikv/pd#6898 Signed-off-by: Ryan Leung Co-authored-by: ti-chi-bot[bot] <108142056+ti-chi-bot[bot]@users.noreply.github.com> --- server/api/admin_test.go | 2 +- tests/integrations/tso/server_test.go | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/server/api/admin_test.go b/server/api/admin_test.go index 3a628a1de61..f3b3dd64bd3 100644 --- a/server/api/admin_test.go +++ b/server/api/admin_test.go @@ -181,7 +181,7 @@ func (suite *adminTestSuite) TestPersistFile() { func makeTS(offset time.Duration) uint64 { physical := time.Now().Add(offset).UnixNano() / int64(time.Millisecond) - return uint64(physical << 18) + return uint64(physical) << 18 } func (suite *adminTestSuite) TestResetTS() { diff --git a/tests/integrations/tso/server_test.go b/tests/integrations/tso/server_test.go index ac3d914aa80..5590ba68d37 100644 --- a/tests/integrations/tso/server_test.go +++ b/tests/integrations/tso/server_test.go @@ -152,12 +152,12 @@ func (suite *tsoServerTestSuite) TestConcurrentlyReset() { for i := 0; i < 2; i++ { go func() { defer wg.Done() - for j := 0; j <= 100; j++ { + for j := 0; j <= 50; j++ { // Get a copy of now then call base.add, because now is shared by all goroutines // and now.add() will add to itself which isn't atomic and multi-goroutine safe. base := now - physical := base.Add(time.Duration(2*j)*time.Minute).UnixNano() / int64(time.Millisecond) - ts := uint64(physical << 18) + physical := base.Add(time.Duration(j)*time.Minute).UnixNano() / int64(time.Millisecond) + ts := uint64(physical) << 18 suite.resetTS(ts, false, false) } }() From d64e6981eda4c93e38807cc77a1af5e7e203f132 Mon Sep 17 00:00:00 2001 From: Ryan Leung Date: Thu, 18 Apr 2024 14:06:07 +0800 Subject: [PATCH 09/17] *: make `TestSplitKeyspaceGroup` stable (#8088) close tikv/pd#7380 Signed-off-by: Ryan Leung --- tests/server/apiv2/handlers/testutil.go | 42 ++++++++++++++++++------- 1 file changed, 31 insertions(+), 11 deletions(-) diff --git a/tests/server/apiv2/handlers/testutil.go b/tests/server/apiv2/handlers/testutil.go index d26ce732714..c5682aafbce 100644 --- a/tests/server/apiv2/handlers/testutil.go +++ b/tests/server/apiv2/handlers/testutil.go @@ -24,6 +24,7 @@ import ( "github.com/pingcap/kvproto/pkg/keyspacepb" "github.com/stretchr/testify/require" "github.com/tikv/pd/pkg/storage/endpoint" + "github.com/tikv/pd/pkg/utils/testutil" "github.com/tikv/pd/server/apiv2/handlers" "github.com/tikv/pd/tests" ) @@ -168,8 +169,14 @@ func tryCreateKeyspaceGroup(re *require.Assertions, server *tests.TestServer, re // MustLoadKeyspaceGroupByID loads the keyspace group by ID with HTTP API. func MustLoadKeyspaceGroupByID(re *require.Assertions, server *tests.TestServer, id uint32) *endpoint.KeyspaceGroup { - kg, code := TryLoadKeyspaceGroupByID(re, server, id) - re.Equal(http.StatusOK, code) + var ( + kg *endpoint.KeyspaceGroup + code int + ) + testutil.Eventually(re, func() bool { + kg, code = TryLoadKeyspaceGroupByID(re, server, id) + return code == http.StatusOK + }) return kg } @@ -232,15 +239,28 @@ func MustSplitKeyspaceGroup(re *require.Assertions, server *tests.TestServer, id // MustFinishSplitKeyspaceGroup finishes a keyspace group split with HTTP API. func MustFinishSplitKeyspaceGroup(re *require.Assertions, server *tests.TestServer, id uint32) { - httpReq, err := http.NewRequest(http.MethodDelete, server.GetAddr()+keyspaceGroupsPrefix+fmt.Sprintf("/%d/split", id), http.NoBody) - re.NoError(err) - // Send request. - resp, err := dialClient.Do(httpReq) - re.NoError(err) - defer resp.Body.Close() - data, err := io.ReadAll(resp.Body) - re.NoError(err) - re.Equal(http.StatusOK, resp.StatusCode, string(data)) + testutil.Eventually(re, func() bool { + httpReq, err := http.NewRequest(http.MethodDelete, server.GetAddr()+keyspaceGroupsPrefix+fmt.Sprintf("/%d/split", id), http.NoBody) + if err != nil { + return false + } + // Send request. + resp, err := dialClient.Do(httpReq) + if err != nil { + return false + } + defer resp.Body.Close() + data, err := io.ReadAll(resp.Body) + if err != nil { + return false + } + if resp.StatusCode == http.StatusServiceUnavailable || + resp.StatusCode == http.StatusInternalServerError { + return false + } + re.Equal(http.StatusOK, resp.StatusCode, string(data)) + return true + }) } // MustMergeKeyspaceGroup merges keyspace groups with HTTP API. From 882d2e55951f7eba3f888a91872b08fea395e3e0 Mon Sep 17 00:00:00 2001 From: Ryan Leung Date: Thu, 18 Apr 2024 15:58:37 +0800 Subject: [PATCH 10/17] *: make `TestRateLimitConfigReload` stable (#8092) close tikv/pd#8083 Signed-off-by: Ryan Leung --- tests/server/config/config_test.go | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/tests/server/config/config_test.go b/tests/server/config/config_test.go index b6fcecbd47b..57e4272f7ea 100644 --- a/tests/server/config/config_test.go +++ b/tests/server/config/config_test.go @@ -32,7 +32,6 @@ import ( tu "github.com/tikv/pd/pkg/utils/testutil" "github.com/tikv/pd/pkg/utils/typeutil" "github.com/tikv/pd/pkg/versioninfo" - "github.com/tikv/pd/server" "github.com/tikv/pd/server/config" "github.com/tikv/pd/tests" ) @@ -74,11 +73,7 @@ func TestRateLimitConfigReload(t *testing.T) { oldLeaderName := leader.GetServer().Name() leader.GetServer().GetMember().ResignEtcdLeader(leader.GetServer().Context(), oldLeaderName, "") - var servers []*server.Server - for _, s := range cluster.GetServers() { - servers = append(servers, s.GetServer()) - } - server.MustWaitLeader(re, servers) + re.NotEmpty(cluster.WaitLeader()) leader = cluster.GetLeaderServer() re.NotNil(leader) re.True(leader.GetServer().GetServiceMiddlewarePersistOptions().IsRateLimitEnabled()) From 5d3c8c105626ee86956f1c24d3b8750243a50961 Mon Sep 17 00:00:00 2001 From: ShuNing Date: Fri, 19 Apr 2024 12:12:06 +0800 Subject: [PATCH 11/17] schedule: fix the filter metrics flush (#8097) close tikv/pd#8098 schedule: fix the filter metrics flush Signed-off-by: nolouch --- pkg/schedule/schedulers/balance_leader.go | 2 +- pkg/schedule/schedulers/balance_region.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/pkg/schedule/schedulers/balance_leader.go b/pkg/schedule/schedulers/balance_leader.go index b4e6feb332c..6976bc4d8ea 100644 --- a/pkg/schedule/schedulers/balance_leader.go +++ b/pkg/schedule/schedulers/balance_leader.go @@ -354,6 +354,7 @@ func (l *balanceLeaderScheduler) Schedule(cluster sche.SchedulerCluster, dryRun if dryRun { collector = plan.NewCollector(basePlan) } + defer l.filterCounter.Flush() batch := l.conf.getBatch() balanceLeaderScheduleCounter.Inc() @@ -395,7 +396,6 @@ func (l *balanceLeaderScheduler) Schedule(cluster sche.SchedulerCluster, dryRun } } } - l.filterCounter.Flush() l.retryQuota.GC(append(sourceCandidate.stores, targetCandidate.stores...)) return result, collector.GetPlans() } diff --git a/pkg/schedule/schedulers/balance_region.go b/pkg/schedule/schedulers/balance_region.go index 98e3be6e08a..e4202c133ad 100644 --- a/pkg/schedule/schedulers/balance_region.go +++ b/pkg/schedule/schedulers/balance_region.go @@ -114,6 +114,7 @@ func (s *balanceRegionScheduler) IsScheduleAllowed(cluster sche.SchedulerCluster func (s *balanceRegionScheduler) Schedule(cluster sche.SchedulerCluster, dryRun bool) ([]*operator.Operator, []plan.Plan) { basePlan := plan.NewBalanceSchedulerPlan() + defer s.filterCounter.Flush() var collector *plan.Collector if dryRun { collector = plan.NewCollector(basePlan) @@ -217,7 +218,6 @@ func (s *balanceRegionScheduler) Schedule(cluster sche.SchedulerCluster, dryRun } s.retryQuota.Attenuate(solver.Source) } - s.filterCounter.Flush() s.retryQuota.GC(stores) return nil, collector.GetPlans() } From 32bf572de6b55cbcf6556021f8ad0d4485247240 Mon Sep 17 00:00:00 2001 From: Hu# Date: Fri, 19 Apr 2024 16:58:37 +0800 Subject: [PATCH 12/17] tests/etcd: remove mod to make test stable (#8102) ref tikv/pd#7969, close tikv/pd#8091 Signed-off-by: husharp --- pkg/utils/etcdutil/testutil.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/utils/etcdutil/testutil.go b/pkg/utils/etcdutil/testutil.go index d9464eeceeb..13c10260a40 100644 --- a/pkg/utils/etcdutil/testutil.go +++ b/pkg/utils/etcdutil/testutil.go @@ -51,7 +51,7 @@ func NewTestSingleConfig() *embed.Config { } func genRandName() string { - return "test_etcd_" + strconv.FormatInt(time.Now().UnixNano()%10000, 10) + return "test_etcd_" + strconv.FormatInt(time.Now().UnixNano(), 10) } // NewTestEtcdCluster is used to create a etcd cluster for the unit test purpose. From 4a7bffcb3baf7a8e2515e70834d8567bc17439da Mon Sep 17 00:00:00 2001 From: Hu# Date: Fri, 19 Apr 2024 19:25:37 +0800 Subject: [PATCH 13/17] operator: make additional information thread safe (#8104) close tikv/pd#8079 Signed-off-by: husharp --- pkg/schedule/operator/create_operator.go | 4 +- pkg/schedule/operator/operator.go | 46 ++++++++----------- pkg/schedule/operator/operator_controller.go | 14 +++--- pkg/schedule/operator/status_tracker.go | 33 +++++++++++++ pkg/schedule/operator/status_tracker_test.go | 25 ++++++++++ pkg/schedule/scatter/region_scatterer.go | 4 +- pkg/schedule/scatter/region_scatterer_test.go | 2 +- pkg/schedule/schedulers/balance_leader.go | 4 +- pkg/schedule/schedulers/balance_region.go | 4 +- pkg/schedule/schedulers/balance_witness.go | 4 +- pkg/schedule/schedulers/hot_region.go | 4 +- pkg/schedule/schedulers/split_bucket.go | 2 +- 12 files changed, 98 insertions(+), 48 deletions(-) diff --git a/pkg/schedule/operator/create_operator.go b/pkg/schedule/operator/create_operator.go index 638230e3097..64680520933 100644 --- a/pkg/schedule/operator/create_operator.go +++ b/pkg/schedule/operator/create_operator.go @@ -170,8 +170,8 @@ func CreateSplitRegionOperator(desc string, region *core.RegionInfo, kind OpKind brief += fmt.Sprintf(" and keys %v", hexKeys) } op := NewOperator(desc, brief, region.GetID(), region.GetRegionEpoch(), kind|OpSplit, region.GetApproximateSize(), step) - op.AdditionalInfos["region-start-key"] = core.HexRegionKeyStr(logutil.RedactBytes(region.GetStartKey())) - op.AdditionalInfos["region-end-key"] = core.HexRegionKeyStr(logutil.RedactBytes(region.GetEndKey())) + op.SetAdditionalInfo("region-start-key", core.HexRegionKeyStr(logutil.RedactBytes(region.GetStartKey()))) + op.SetAdditionalInfo("region-end-key", core.HexRegionKeyStr(logutil.RedactBytes(region.GetEndKey()))) return op, nil } diff --git a/pkg/schedule/operator/operator.go b/pkg/schedule/operator/operator.go index b87a050969f..de197c4fba4 100644 --- a/pkg/schedule/operator/operator.go +++ b/pkg/schedule/operator/operator.go @@ -15,7 +15,6 @@ package operator import ( - "encoding/json" "fmt" "reflect" "strconv" @@ -83,7 +82,7 @@ type Operator struct { level constant.PriorityLevel Counters []prometheus.Counter FinishedCounters []prometheus.Counter - AdditionalInfos map[string]string + additionalInfos opAdditionalInfo ApproximateSize int64 timeout time.Duration influence *OpInfluence @@ -100,16 +99,18 @@ func NewOperator(desc, brief string, regionID uint64, regionEpoch *metapb.Region maxDuration += v.Timeout(approximateSize).Seconds() } return &Operator{ - desc: desc, - brief: brief, - regionID: regionID, - regionEpoch: regionEpoch, - kind: kind, - steps: steps, - stepsTime: make([]int64, len(steps)), - status: NewOpStatusTracker(), - level: level, - AdditionalInfos: make(map[string]string), + desc: desc, + brief: brief, + regionID: regionID, + regionEpoch: regionEpoch, + kind: kind, + steps: steps, + stepsTime: make([]int64, len(steps)), + status: NewOpStatusTracker(), + level: level, + additionalInfos: opAdditionalInfo{ + value: make(map[string]string), + }, ApproximateSize: approximateSize, timeout: time.Duration(maxDuration) * time.Second, } @@ -118,8 +119,8 @@ func NewOperator(desc, brief string, regionID uint64, regionEpoch *metapb.Region // Sync some attribute with the given timeout. func (o *Operator) Sync(other *Operator) { o.timeout = other.timeout - o.AdditionalInfos[string(RelatedMergeRegion)] = strconv.FormatUint(other.RegionID(), 10) - other.AdditionalInfos[string(RelatedMergeRegion)] = strconv.FormatUint(o.RegionID(), 10) + o.SetAdditionalInfo(string(RelatedMergeRegion), strconv.FormatUint(other.RegionID(), 10)) + other.SetAdditionalInfo(string(RelatedMergeRegion), strconv.FormatUint(o.RegionID(), 10)) } func (o *Operator) String() string { @@ -297,8 +298,10 @@ func (o *Operator) CheckSuccess() bool { // Cancel marks the operator canceled. func (o *Operator) Cancel(reason ...CancelReasonType) bool { - if _, ok := o.AdditionalInfos[cancelReason]; !ok && len(reason) != 0 { - o.AdditionalInfos[cancelReason] = string(reason[0]) + o.additionalInfos.Lock() + defer o.additionalInfos.Unlock() + if _, ok := o.additionalInfos.value[cancelReason]; !ok && len(reason) != 0 { + o.additionalInfos.value[cancelReason] = string(reason[0]) } return o.status.To(CANCELED) } @@ -507,17 +510,6 @@ func (o *Operator) Record(finishTime time.Time) *OpRecord { return record } -// GetAdditionalInfo returns additional info with string -func (o *Operator) GetAdditionalInfo() string { - if len(o.AdditionalInfos) != 0 { - additionalInfo, err := json.Marshal(o.AdditionalInfos) - if err == nil { - return string(additionalInfo) - } - } - return "" -} - // IsLeaveJointStateOperator returns true if the desc is OpDescLeaveJointState. func (o *Operator) IsLeaveJointStateOperator() bool { return strings.EqualFold(o.desc, OpDescLeaveJointState) diff --git a/pkg/schedule/operator/operator_controller.go b/pkg/schedule/operator/operator_controller.go index 86e51fe70d6..d63e843f52a 100644 --- a/pkg/schedule/operator/operator_controller.go +++ b/pkg/schedule/operator/operator_controller.go @@ -510,7 +510,7 @@ func (oc *Controller) addOperatorInner(op *Operator) bool { log.Info("add operator", zap.Uint64("region-id", regionID), zap.Reflect("operator", op), - zap.String("additional-info", op.GetAdditionalInfo())) + zap.String("additional-info", op.LogAdditionalInfo())) // If there is an old operator, replace it. The priority should be checked // already. @@ -657,7 +657,7 @@ func (oc *Controller) removeOperatorInner(op *Operator) bool { } func (oc *Controller) removeRelatedMergeOperator(op *Operator) { - relatedID, _ := strconv.ParseUint(op.AdditionalInfos[string(RelatedMergeRegion)], 10, 64) + relatedID, _ := strconv.ParseUint(op.GetAdditionalInfo(string(RelatedMergeRegion)), 10, 64) relatedOpi, ok := oc.operators.Load(relatedID) if !ok { return @@ -666,7 +666,7 @@ func (oc *Controller) removeRelatedMergeOperator(op *Operator) { if relatedOp != nil && relatedOp.Status() != CANCELED { log.Info("operator canceled related merge region", zap.Uint64("region-id", relatedOp.RegionID()), - zap.String("additional-info", relatedOp.GetAdditionalInfo()), + zap.String("additional-info", relatedOp.LogAdditionalInfo()), zap.Duration("takes", relatedOp.RunningTime())) oc.removeOperatorInner(relatedOp) relatedOp.Cancel(RelatedMergeRegion) @@ -695,7 +695,7 @@ func (oc *Controller) buryOperator(op *Operator) { zap.Uint64("region-id", op.RegionID()), zap.Duration("takes", op.RunningTime()), zap.Reflect("operator", op), - zap.String("additional-info", op.GetAdditionalInfo())) + zap.String("additional-info", op.LogAdditionalInfo())) operatorCounter.WithLabelValues(op.Desc(), "finish").Inc() operatorDuration.WithLabelValues(op.Desc()).Observe(op.RunningTime().Seconds()) for _, counter := range op.FinishedCounters { @@ -706,7 +706,7 @@ func (oc *Controller) buryOperator(op *Operator) { zap.Uint64("region-id", op.RegionID()), zap.Duration("takes", op.RunningTime()), zap.Reflect("operator", op), - zap.String("additional-info", op.GetAdditionalInfo())) + zap.String("additional-info", op.LogAdditionalInfo())) operatorCounter.WithLabelValues(op.Desc(), "replace").Inc() case EXPIRED: log.Info("operator expired", @@ -719,14 +719,14 @@ func (oc *Controller) buryOperator(op *Operator) { zap.Uint64("region-id", op.RegionID()), zap.Duration("takes", op.RunningTime()), zap.Reflect("operator", op), - zap.String("additional-info", op.GetAdditionalInfo())) + zap.String("additional-info", op.LogAdditionalInfo())) operatorCounter.WithLabelValues(op.Desc(), "timeout").Inc() case CANCELED: log.Info("operator canceled", zap.Uint64("region-id", op.RegionID()), zap.Duration("takes", op.RunningTime()), zap.Reflect("operator", op), - zap.String("additional-info", op.GetAdditionalInfo()), + zap.String("additional-info", op.LogAdditionalInfo()), ) operatorCounter.WithLabelValues(op.Desc(), "cancel").Inc() } diff --git a/pkg/schedule/operator/status_tracker.go b/pkg/schedule/operator/status_tracker.go index 0ba8135750c..a74d94b18a4 100644 --- a/pkg/schedule/operator/status_tracker.go +++ b/pkg/schedule/operator/status_tracker.go @@ -15,6 +15,7 @@ package operator import ( + "encoding/json" "time" "github.com/tikv/pd/pkg/utils/syncutil" @@ -135,3 +136,35 @@ func (trk *OpStatusTracker) String() string { defer trk.rw.RUnlock() return OpStatusToString(trk.current) } + +type opAdditionalInfo struct { + syncutil.RWMutex + value map[string]string +} + +// SetAdditionalInfo sets additional info with key and value. +func (o *Operator) SetAdditionalInfo(key string, value string) { + o.additionalInfos.Lock() + defer o.additionalInfos.Unlock() + o.additionalInfos.value[key] = value +} + +// GetAdditionalInfo returns additional info with key. +func (o *Operator) GetAdditionalInfo(key string) string { + o.additionalInfos.RLock() + defer o.additionalInfos.RUnlock() + return o.additionalInfos.value[key] +} + +// LogAdditionalInfo returns additional info with string +func (o *Operator) LogAdditionalInfo() string { + o.additionalInfos.RLock() + defer o.additionalInfos.RUnlock() + if len(o.additionalInfos.value) != 0 { + additionalInfo, err := json.Marshal(o.additionalInfos.value) + if err == nil { + return string(additionalInfo) + } + } + return "" +} diff --git a/pkg/schedule/operator/status_tracker_test.go b/pkg/schedule/operator/status_tracker_test.go index e53b017229a..8c897d1e545 100644 --- a/pkg/schedule/operator/status_tracker_test.go +++ b/pkg/schedule/operator/status_tracker_test.go @@ -15,6 +15,8 @@ package operator import ( + "fmt" + "sync" "testing" "time" @@ -178,3 +180,26 @@ func checkReachTime(re *require.Assertions, trk *OpStatusTracker, reached ...OpS re.True(trk.ReachTimeOf(st).IsZero()) } } + +func TestAdditionalInfoConcurrent(t *testing.T) { + op := NewOperator("test", "test", 0, nil, OpAdmin, 0) + + var wg sync.WaitGroup + for i := 0; i < 1000; i++ { + wg.Add(1) + go func(i int) { + defer wg.Done() + key := fmt.Sprintf("key%d", i) + value := fmt.Sprintf("value%d", i) + op.SetAdditionalInfo(key, value) + if op.GetAdditionalInfo(key) != value { + t.Errorf("unexpected value for key %s", key) + } + }(i) + } + wg.Wait() + + if logInfo := op.LogAdditionalInfo(); logInfo == "" { + t.Error("LogAdditionalInfo returned an empty string") + } +} diff --git a/pkg/schedule/scatter/region_scatterer.go b/pkg/schedule/scatter/region_scatterer.go index 898c4d052a7..bdec5c98c9c 100644 --- a/pkg/schedule/scatter/region_scatterer.go +++ b/pkg/schedule/scatter/region_scatterer.go @@ -399,8 +399,8 @@ func (r *RegionScatterer) scatterRegion(region *core.RegionInfo, group string, s if op != nil { scatterSuccessCounter.Inc() r.Put(targetPeers, targetLeader, group) - op.AdditionalInfos["group"] = group - op.AdditionalInfos["leader-picked-count"] = strconv.FormatUint(leaderStorePickedCount, 10) + op.SetAdditionalInfo("group", group) + op.SetAdditionalInfo("leader-picked-count", strconv.FormatUint(leaderStorePickedCount, 10)) op.SetPriorityLevel(constant.High) } return op, nil diff --git a/pkg/schedule/scatter/region_scatterer_test.go b/pkg/schedule/scatter/region_scatterer_test.go index af41ed04b76..b0027e0e415 100644 --- a/pkg/schedule/scatter/region_scatterer_test.go +++ b/pkg/schedule/scatter/region_scatterer_test.go @@ -679,7 +679,7 @@ func TestSelectedStoresTooFewPeers(t *testing.T) { re.NoError(err) re.False(isPeerCountChanged(op)) if op != nil { - re.Equal(group, op.AdditionalInfos["group"]) + re.Equal(group, op.GetAdditionalInfo("group")) } } } diff --git a/pkg/schedule/schedulers/balance_leader.go b/pkg/schedule/schedulers/balance_leader.go index 6976bc4d8ea..6114d2a8f89 100644 --- a/pkg/schedule/schedulers/balance_leader.go +++ b/pkg/schedule/schedulers/balance_leader.go @@ -567,7 +567,7 @@ func (l *balanceLeaderScheduler) createOperator(solver *solver, collector *plan. op.FinishedCounters = append(op.FinishedCounters, balanceDirectionCounter.WithLabelValues(l.GetName(), solver.SourceMetricLabel(), solver.TargetMetricLabel()), ) - op.AdditionalInfos["sourceScore"] = strconv.FormatFloat(solver.sourceScore, 'f', 2, 64) - op.AdditionalInfos["targetScore"] = strconv.FormatFloat(solver.targetScore, 'f', 2, 64) + op.SetAdditionalInfo("sourceScore", strconv.FormatFloat(solver.sourceScore, 'f', 2, 64)) + op.SetAdditionalInfo("targetScore", strconv.FormatFloat(solver.targetScore, 'f', 2, 64)) return op } diff --git a/pkg/schedule/schedulers/balance_region.go b/pkg/schedule/schedulers/balance_region.go index e4202c133ad..608d008a99e 100644 --- a/pkg/schedule/schedulers/balance_region.go +++ b/pkg/schedule/schedulers/balance_region.go @@ -278,8 +278,8 @@ func (s *balanceRegionScheduler) transferPeer(solver *solver, collector *plan.Co op.FinishedCounters = append(op.FinishedCounters, balanceDirectionCounter.WithLabelValues(s.GetName(), sourceLabel, targetLabel), ) - op.AdditionalInfos["sourceScore"] = strconv.FormatFloat(solver.sourceScore, 'f', 2, 64) - op.AdditionalInfos["targetScore"] = strconv.FormatFloat(solver.targetScore, 'f', 2, 64) + op.SetAdditionalInfo("sourceScore", strconv.FormatFloat(solver.sourceScore, 'f', 2, 64)) + op.SetAdditionalInfo("targetScore", strconv.FormatFloat(solver.targetScore, 'f', 2, 64)) return op } diff --git a/pkg/schedule/schedulers/balance_witness.go b/pkg/schedule/schedulers/balance_witness.go index 3c4776c4666..aa97874409a 100644 --- a/pkg/schedule/schedulers/balance_witness.go +++ b/pkg/schedule/schedulers/balance_witness.go @@ -378,7 +378,7 @@ func (b *balanceWitnessScheduler) createOperator(solver *solver, collector *plan b.counter.WithLabelValues("move-witness", solver.SourceMetricLabel()+"-out"), b.counter.WithLabelValues("move-witness", solver.TargetMetricLabel()+"-in"), ) - op.AdditionalInfos["sourceScore"] = strconv.FormatFloat(solver.sourceScore, 'f', 2, 64) - op.AdditionalInfos["targetScore"] = strconv.FormatFloat(solver.targetScore, 'f', 2, 64) + op.SetAdditionalInfo("sourceScore", strconv.FormatFloat(solver.sourceScore, 'f', 2, 64)) + op.SetAdditionalInfo("targetScore", strconv.FormatFloat(solver.targetScore, 'f', 2, 64)) return op } diff --git a/pkg/schedule/schedulers/hot_region.go b/pkg/schedule/schedulers/hot_region.go index b4e904c1481..5e5e254596a 100644 --- a/pkg/schedule/schedulers/hot_region.go +++ b/pkg/schedule/schedulers/hot_region.go @@ -1653,8 +1653,8 @@ func (bs *balanceSolver) splitBucketsByLoad(region *core.RegionInfo, bucketStats } op := bs.splitBucketsOperator(region, [][]byte{splitKey}) if op != nil { - op.AdditionalInfos["accLoads"] = strconv.FormatUint(acc-stats[splitIdx-1].Loads[dim], 10) - op.AdditionalInfos["totalLoads"] = strconv.FormatUint(totalLoads, 10) + op.SetAdditionalInfo("accLoads", strconv.FormatUint(acc-stats[splitIdx-1].Loads[dim], 10)) + op.SetAdditionalInfo("totalLoads", strconv.FormatUint(totalLoads, 10)) } return op } diff --git a/pkg/schedule/schedulers/split_bucket.go b/pkg/schedule/schedulers/split_bucket.go index 609510446c7..32e57ec9b3d 100644 --- a/pkg/schedule/schedulers/split_bucket.go +++ b/pkg/schedule/schedulers/split_bucket.go @@ -297,7 +297,7 @@ func (s *splitBucketScheduler) splitBucket(plan *splitBucketPlan) []*operator.Op return nil } splitBucketNewOperatorCounter.Inc() - op.AdditionalInfos["hot-degree"] = strconv.FormatInt(int64(splitBucket.HotDegree), 10) + op.SetAdditionalInfo("hot-degree", strconv.FormatInt(int64(splitBucket.HotDegree), 10)) return []*operator.Operator{op} } return nil From 2f85f1fd30061d81a704a830d15fa0defc9103b5 Mon Sep 17 00:00:00 2001 From: Ryan Leung Date: Mon, 22 Apr 2024 14:31:39 +0800 Subject: [PATCH 14/17] *: make `TestTSOKeyspaceGroupSplitElection` stable (#8110) close tikv/pd#8099 Signed-off-by: Ryan Leung --- pkg/tso/keyspace_group_manager.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/tso/keyspace_group_manager.go b/pkg/tso/keyspace_group_manager.go index d1e94d445cc..2930357e2b4 100644 --- a/pkg/tso/keyspace_group_manager.go +++ b/pkg/tso/keyspace_group_manager.go @@ -1439,7 +1439,7 @@ func (kgm *KeyspaceGroupManager) groupSplitPatroller() { defer kgm.wg.Done() patrolInterval := groupPatrolInterval failpoint.Inject("fastGroupSplitPatroller", func() { - patrolInterval = 200 * time.Millisecond + patrolInterval = time.Second }) ticker := time.NewTicker(patrolInterval) defer ticker.Stop() From 5cdf2526e703f89c541f068813bee5467361c3e2 Mon Sep 17 00:00:00 2001 From: Ryan Leung Date: Mon, 22 Apr 2024 16:48:09 +0800 Subject: [PATCH 15/17] *: update dependencies (#8112) ref tikv/pd#4399 Signed-off-by: Ryan Leung --- client/go.mod | 10 +++++----- client/go.sum | 24 ++++++++++++------------ go.mod | 2 +- go.sum | 4 ++-- tests/integrations/go.mod | 2 +- tests/integrations/go.sum | 4 ++-- tools/go.mod | 2 +- tools/go.sum | 4 ++-- 8 files changed, 26 insertions(+), 26 deletions(-) diff --git a/client/go.mod b/client/go.mod index 9b2cb87f75e..89799796521 100644 --- a/client/go.mod +++ b/client/go.mod @@ -19,7 +19,7 @@ require ( go.uber.org/multierr v1.11.0 go.uber.org/zap v1.24.0 golang.org/x/exp v0.0.0-20230711005742-c3f37128e5a4 - google.golang.org/grpc v1.59.0 + google.golang.org/grpc v1.62.1 google.golang.org/grpc/examples v0.0.0-20231221225426-4f03f3ff32c9 ) @@ -34,11 +34,11 @@ require ( github.com/prometheus/client_model v0.5.0 // indirect github.com/prometheus/common v0.46.0 // indirect github.com/prometheus/procfs v0.12.0 // indirect - golang.org/x/net v0.20.0 // indirect - golang.org/x/sys v0.16.0 // indirect + golang.org/x/net v0.23.0 // indirect + golang.org/x/sys v0.18.0 // indirect golang.org/x/text v0.14.0 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20231106174013-bbf56f31fb17 // indirect - google.golang.org/protobuf v1.32.0 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240123012728-ef4313101c80 // indirect + google.golang.org/protobuf v1.33.0 // indirect gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/client/go.sum b/client/go.sum index a58d351ebcf..54942bb0bb8 100644 --- a/client/go.sum +++ b/client/go.sum @@ -22,8 +22,8 @@ github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaS github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= -github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= @@ -110,8 +110,8 @@ golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.20.0 h1:aCL9BSgETF1k+blQaYUBx9hJ9LOGP3gAVemcZlf1Kpo= -golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= +golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= +golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -122,8 +122,8 @@ golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.16.0 h1:xWw16ngr6ZMtmxDyKyIgsE93KNKz5HKmMa3b8ALHidU= -golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= +golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= @@ -144,16 +144,16 @@ golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/genproto/googleapis/rpc v0.0.0-20231106174013-bbf56f31fb17 h1:Jyp0Hsi0bmHXG6k9eATXoYtjd6e2UzZ1SCn/wIupY14= -google.golang.org/genproto/googleapis/rpc v0.0.0-20231106174013-bbf56f31fb17/go.mod h1:oQ5rr10WTTMvP4A36n8JpR1OrO1BEiV4f78CneXZxkA= -google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk= -google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240123012728-ef4313101c80 h1:AjyfHzEPEFp/NpvfN5g+KDla3EMojjhRVZc1i7cj+oM= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240123012728-ef4313101c80/go.mod h1:PAREbraiVEVGVdTZsVWjSbbTtSyGbAgIIvni8a8CD5s= +google.golang.org/grpc v1.62.1 h1:B4n+nfKzOICUXMgyrNd19h/I9oH0L1pizfk1d4zSgTk= +google.golang.org/grpc v1.62.1/go.mod h1:IWTG0VlJLCh1SkC58F7np9ka9mx/WNkjl4PGJaiq+QE= google.golang.org/grpc/examples v0.0.0-20231221225426-4f03f3ff32c9 h1:ATnmU8nL2NfIyTSiBvJVDIDIr3qBmeW+c7z7XU21eWs= google.golang.org/grpc/examples v0.0.0-20231221225426-4f03f3ff32c9/go.mod h1:j5uROIAAgi3YmtiETMt1LW0d/lHqQ7wwrIY4uGRXLQ4= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.32.0 h1:pPC6BG5ex8PDFnkbrGU3EixyhKcQ2aDuBS36lqK/C7I= -google.golang.org/protobuf v1.32.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= +google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/go.mod b/go.mod index c76242f3753..ff0cb20069a 100644 --- a/go.mod +++ b/go.mod @@ -179,7 +179,7 @@ require ( golang.org/x/crypto v0.21.0 // indirect golang.org/x/image v0.10.0 // indirect golang.org/x/mod v0.13.0 // indirect - golang.org/x/net v0.22.0 // indirect + golang.org/x/net v0.23.0 // indirect golang.org/x/oauth2 v0.18.0 // indirect golang.org/x/sync v0.6.0 // indirect golang.org/x/sys v0.18.0 // indirect diff --git a/go.sum b/go.sum index d11fad07aa6..8c77a4b84da 100644 --- a/go.sum +++ b/go.sum @@ -605,8 +605,8 @@ golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20211123203042-d83791d6bcd9/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.22.0 h1:9sGLhx7iRIHEiX0oAJ3MRZMUCElJgy7Br1nO+AMN3Tc= -golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= +golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= +golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.18.0 h1:09qnuIAgzdx1XplqJvW6CQqMCtGZykZWcXzPMPUusvI= diff --git a/tests/integrations/go.mod b/tests/integrations/go.mod index cdd244cafc1..bb231f747b7 100644 --- a/tests/integrations/go.mod +++ b/tests/integrations/go.mod @@ -168,7 +168,7 @@ require ( golang.org/x/crypto v0.21.0 // indirect golang.org/x/exp v0.0.0-20230711005742-c3f37128e5a4 // indirect golang.org/x/image v0.10.0 // indirect - golang.org/x/net v0.22.0 // indirect + golang.org/x/net v0.23.0 // indirect golang.org/x/oauth2 v0.18.0 // indirect golang.org/x/sync v0.6.0 // indirect golang.org/x/sys v0.18.0 // indirect diff --git a/tests/integrations/go.sum b/tests/integrations/go.sum index 79f7dddd130..eeb2d73ba7f 100644 --- a/tests/integrations/go.sum +++ b/tests/integrations/go.sum @@ -598,8 +598,8 @@ golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20211123203042-d83791d6bcd9/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.22.0 h1:9sGLhx7iRIHEiX0oAJ3MRZMUCElJgy7Br1nO+AMN3Tc= -golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= +golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= +golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.18.0 h1:09qnuIAgzdx1XplqJvW6CQqMCtGZykZWcXzPMPUusvI= diff --git a/tools/go.mod b/tools/go.mod index 9d8728f7034..8d0f0d4ec35 100644 --- a/tools/go.mod +++ b/tools/go.mod @@ -167,7 +167,7 @@ require ( golang.org/x/crypto v0.21.0 // indirect golang.org/x/exp v0.0.0-20230711005742-c3f37128e5a4 // indirect golang.org/x/image v0.10.0 // indirect - golang.org/x/net v0.22.0 // indirect + golang.org/x/net v0.23.0 // indirect golang.org/x/oauth2 v0.18.0 // indirect golang.org/x/sync v0.6.0 // indirect golang.org/x/sys v0.18.0 // indirect diff --git a/tools/go.sum b/tools/go.sum index d7c7a4801b1..a3c41c16420 100644 --- a/tools/go.sum +++ b/tools/go.sum @@ -599,8 +599,8 @@ golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20211123203042-d83791d6bcd9/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.22.0 h1:9sGLhx7iRIHEiX0oAJ3MRZMUCElJgy7Br1nO+AMN3Tc= -golang.org/x/net v0.22.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= +golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= +golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.18.0 h1:09qnuIAgzdx1XplqJvW6CQqMCtGZykZWcXzPMPUusvI= From 1d386f6f189feb4a9b83799efa593474b62e5aa3 Mon Sep 17 00:00:00 2001 From: lhy1024 Date: Mon, 22 Apr 2024 21:25:39 +0800 Subject: [PATCH 16/17] tests: avoid panic in cluster test (#8114) close tikv/pd#8113 Signed-off-by: lhy1024 Co-authored-by: ti-chi-bot[bot] <108142056+ti-chi-bot[bot]@users.noreply.github.com> --- server/cluster/cluster_test.go | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/server/cluster/cluster_test.go b/server/cluster/cluster_test.go index ecd579d8881..945e354bb6c 100644 --- a/server/cluster/cluster_test.go +++ b/server/cluster/cluster_test.go @@ -3655,7 +3655,8 @@ func TestInterval(t *testing.T) { func waitAddLearner(re *require.Assertions, stream mockhbstream.HeartbeatStream, region *core.RegionInfo, storeID uint64) *core.RegionInfo { var res *pdpb.RegionHeartbeatResponse testutil.Eventually(re, func() bool { - if res = stream.Recv().(*pdpb.RegionHeartbeatResponse); res != nil { + if r := stream.Recv(); r != nil { + res = r.(*pdpb.RegionHeartbeatResponse) return res.GetRegionId() == region.GetID() && res.GetChangePeer().GetChangeType() == eraftpb.ConfChangeType_AddLearnerNode && res.GetChangePeer().GetPeer().GetStoreId() == storeID @@ -3671,7 +3672,8 @@ func waitAddLearner(re *require.Assertions, stream mockhbstream.HeartbeatStream, func waitPromoteLearner(re *require.Assertions, stream mockhbstream.HeartbeatStream, region *core.RegionInfo, storeID uint64) *core.RegionInfo { var res *pdpb.RegionHeartbeatResponse testutil.Eventually(re, func() bool { - if res = stream.Recv().(*pdpb.RegionHeartbeatResponse); res != nil { + if r := stream.Recv(); r != nil { + res = r.(*pdpb.RegionHeartbeatResponse) return res.GetRegionId() == region.GetID() && res.GetChangePeer().GetChangeType() == eraftpb.ConfChangeType_AddNode && res.GetChangePeer().GetPeer().GetStoreId() == storeID @@ -3688,7 +3690,8 @@ func waitPromoteLearner(re *require.Assertions, stream mockhbstream.HeartbeatStr func waitRemovePeer(re *require.Assertions, stream mockhbstream.HeartbeatStream, region *core.RegionInfo, storeID uint64) *core.RegionInfo { var res *pdpb.RegionHeartbeatResponse testutil.Eventually(re, func() bool { - if res = stream.Recv().(*pdpb.RegionHeartbeatResponse); res != nil { + if r := stream.Recv(); r != nil { + res = r.(*pdpb.RegionHeartbeatResponse) return res.GetRegionId() == region.GetID() && res.GetChangePeer().GetChangeType() == eraftpb.ConfChangeType_RemoveNode && res.GetChangePeer().GetPeer().GetStoreId() == storeID @@ -3704,7 +3707,8 @@ func waitRemovePeer(re *require.Assertions, stream mockhbstream.HeartbeatStream, func waitTransferLeader(re *require.Assertions, stream mockhbstream.HeartbeatStream, region *core.RegionInfo, storeID uint64) *core.RegionInfo { var res *pdpb.RegionHeartbeatResponse testutil.Eventually(re, func() bool { - if res = stream.Recv().(*pdpb.RegionHeartbeatResponse); res != nil { + if r := stream.Recv(); r != nil { + res = r.(*pdpb.RegionHeartbeatResponse) if res.GetRegionId() == region.GetID() { for _, peer := range append(res.GetTransferLeader().GetPeers(), res.GetTransferLeader().GetPeer()) { if peer.GetStoreId() == storeID { From 1e65f9dda7d9b93e68178139ec3b4c31539a223d Mon Sep 17 00:00:00 2001 From: Ryan Leung Date: Wed, 24 Apr 2024 14:18:10 +0800 Subject: [PATCH 17/17] *: fix follower cannot handle member request (#8122) ref tikv/pd#7519 Signed-off-by: Ryan Leung Co-authored-by: ti-chi-bot[bot] <108142056+ti-chi-bot[bot]@users.noreply.github.com> --- server/apiv2/handlers/micro_service.go | 1 - 1 file changed, 1 deletion(-) diff --git a/server/apiv2/handlers/micro_service.go b/server/apiv2/handlers/micro_service.go index 209a4c95445..fd44665530f 100644 --- a/server/apiv2/handlers/micro_service.go +++ b/server/apiv2/handlers/micro_service.go @@ -26,7 +26,6 @@ import ( // RegisterMicroService registers microservice handler to the router. func RegisterMicroService(r *gin.RouterGroup) { router := r.Group("ms") - router.Use(middlewares.BootstrapChecker()) router.GET("members/:service", GetMembers) router.GET("primary/:service", GetPrimary) }