From 4b68f66df4a76a539743b7e4b64f98f48584d6ea Mon Sep 17 00:00:00 2001 From: fanfanyangyang Date: Fri, 24 May 2024 15:28:55 +0800 Subject: [PATCH 01/29] =?UTF-8?q?feat:=20=E6=96=B0=E5=A2=9E=E9=99=8D?= =?UTF-8?q?=E4=BD=8E=E5=88=86=E5=8C=BA=E5=AE=9A=E6=97=B6=E4=BB=BB=E5=8A=A1?= =?UTF-8?q?=E8=B4=9F=E8=BD=BD=E7=9A=84=E5=8A=9F=E8=83=BD=20#4545?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../go-pubpkg/errno/50000_dbpartition_code.go | 3 +- .../go-pubpkg/errno/50000_dbpriv_code.go | 2 +- .../assests/migrations/000006_init.down.sql | 1 + .../assests/migrations/000006_init.up.sql | 5 + .../db-partition/cron/cron_basic_func.go | 286 -------- .../mysql/db-partition/cron/cron_object.go | 17 - dbm-services/mysql/db-partition/go.mod | 1 + dbm-services/mysql/db-partition/go.sum | 2 + .../mysql/db-partition/handler/handler.go | 66 +- dbm-services/mysql/db-partition/main.go | 5 +- .../mysql/db-partition/model/init_bkrepo.go | 26 + .../mysql/db-partition/model/init_env.go | 9 + .../mysql/db-partition/monitor/monitor.go | 27 +- .../db-partition/service/bk_repo_service.go | 76 +++ .../db-partition/service/check_partition.go | 148 ++-- .../service/check_partition_base_func.go | 225 ++----- .../service/check_partition_object.go | 38 +- .../db-partition/{cron => service}/cron.go | 41 +- .../db-partition/service/cron_basic_func.go | 632 ++++++++++++++++++ .../mysql/db-partition/service/cron_object.go | 59 ++ .../db-partition/service/db_meta_service.go | 29 +- .../service/execute_partition_object.go | 33 +- .../db-partition/service/manage_config.go | 10 +- dbm-services/mysql/db-priv/assests/migrate.go | 2 +- .../mysql/db-priv/handler/admin_password.go | 6 +- .../mysql/db-priv/service/admin_password.go | 2 +- dbm-services/mysql/db-simulation/go.mod | 2 + dbm-services/mysql/db-simulation/go.sum | 4 + dbm-services/mysql/db-tools/dbactuator/go.mod | 4 +- .../db_meta/api/priv_manager/biz_clusters.py | 1 + .../backend/db_services/partition/handlers.py | 23 +- .../db_services/partition/serializers.py | 2 +- dbm-ui/backend/db_services/partition/views.py | 8 +- dbm-ui/backend/flow/consts.py | 1 + .../bamboo/scene/common/download_file.py | 59 ++ .../bamboo/scene/mysql/mysql_partition.py | 63 +- .../scene/mysql/mysql_partition_cron.py | 78 +++ .../bamboo/scene/spider/spider_partition.py | 105 ++- .../scene/spider/spider_partition_cron.py | 88 +++ .../backend/flow/engine/controller/mysql.py | 16 + .../backend/flow/engine/controller/spider.py | 10 +- .../mysql/mysql_partition_report.py | 128 ++++ .../spider/spider_partition_callback.py | 79 +++ dbm-ui/backend/flow/urls.py | 6 + .../flow/utils/base/payload_handler.py | 1 + .../flow/utils/mysql/mysql_act_dataclass.py | 9 + .../flow/utils/mysql/mysql_act_playload.py | 34 +- .../utils/mysql/mysql_context_dataclass.py | 2 +- dbm-ui/backend/flow/views/download_file.py | 31 + .../flow/views/mysql_partition_cron.py | 23 + .../flow/views/spider_partition_cron.py | 23 + .../builders/mysql/mysql_partition_cron.py | 45 ++ .../tendbcluster/tendb_partition_cron.py | 46 ++ dbm-ui/backend/ticket/constants.py | 3 + dbm-ui/config/prod.py | 2 +- dbm-ui/config/stag.py | 2 +- dbm-ui/poetry.lock | 2 +- helm-charts/bk-dbm/Chart.lock | 6 +- helm-charts/bk-dbm/Chart.yaml | 2 +- .../bk-dbm/charts/dbpartition/Chart.yaml | 4 +- .../configmaps/dbpartition-configmap.yaml | 8 + helm-charts/bk-dbm/values.yaml | 2 + 62 files changed, 1931 insertions(+), 742 deletions(-) create mode 100644 dbm-services/mysql/db-partition/assests/migrations/000006_init.down.sql create mode 100644 dbm-services/mysql/db-partition/assests/migrations/000006_init.up.sql delete mode 100644 dbm-services/mysql/db-partition/cron/cron_basic_func.go delete mode 100644 dbm-services/mysql/db-partition/cron/cron_object.go create mode 100644 dbm-services/mysql/db-partition/model/init_bkrepo.go create mode 100644 dbm-services/mysql/db-partition/service/bk_repo_service.go rename dbm-services/mysql/db-partition/{cron => service}/cron.go (57%) create mode 100644 dbm-services/mysql/db-partition/service/cron_basic_func.go create mode 100644 dbm-services/mysql/db-partition/service/cron_object.go create mode 100644 dbm-ui/backend/flow/engine/bamboo/scene/common/download_file.py create mode 100644 dbm-ui/backend/flow/engine/bamboo/scene/mysql/mysql_partition_cron.py create mode 100644 dbm-ui/backend/flow/engine/bamboo/scene/spider/spider_partition_cron.py create mode 100644 dbm-ui/backend/flow/plugins/components/collections/mysql/mysql_partition_report.py create mode 100644 dbm-ui/backend/flow/plugins/components/collections/spider/spider_partition_callback.py create mode 100644 dbm-ui/backend/flow/views/download_file.py create mode 100644 dbm-ui/backend/flow/views/mysql_partition_cron.py create mode 100644 dbm-ui/backend/flow/views/spider_partition_cron.py create mode 100644 dbm-ui/backend/ticket/builders/mysql/mysql_partition_cron.py create mode 100644 dbm-ui/backend/ticket/builders/tendbcluster/tendb_partition_cron.py diff --git a/dbm-services/common/go-pubpkg/errno/50000_dbpartition_code.go b/dbm-services/common/go-pubpkg/errno/50000_dbpartition_code.go index 27eecb97cb..df877ad3c5 100644 --- a/dbm-services/common/go-pubpkg/errno/50000_dbpartition_code.go +++ b/dbm-services/common/go-pubpkg/errno/50000_dbpartition_code.go @@ -29,5 +29,6 @@ var ( DomainNotExists = Errno{Code: 52030, Message: "domain not exists", CNMessage: "域名不存在"} NotSupportedPartitionType = Errno{Code: 52031, Message: "not supported partition type", CNMessage: "不支持的分区类型"} WrongPartitionNameFormat = Errno{Code: 52032, Message: "wrong partition name format ", CNMessage: "分区名格式错误"} - DownloadDbactorFail = Errno{Code: 52030, Message: "download dbactor fail", CNMessage: "下载dbactor失败"} + DownloadDbactorFail = Errno{Code: 52033, Message: "download dbactor fail", CNMessage: "下载dbactor失败"} + DownloadFileFail = Errno{Code: 52034, Message: "download file fail", CNMessage: "下载文件失败"} ) diff --git a/dbm-services/common/go-pubpkg/errno/50000_dbpriv_code.go b/dbm-services/common/go-pubpkg/errno/50000_dbpriv_code.go index 0375c71035..ade35a7191 100644 --- a/dbm-services/common/go-pubpkg/errno/50000_dbpriv_code.go +++ b/dbm-services/common/go-pubpkg/errno/50000_dbpriv_code.go @@ -69,7 +69,7 @@ var ( CNMessage: "要求包含的字符类型大于字符串长度"} TryTooManyTimes = Errno{Code: 51024, Message: "try too many times", CNMessage: "尝试太多次"} RuleIdNull = Errno{Code: 51025, Message: "Rule ID should not be empty", - CNMessage: "安全规则的id不能为空"} + CNMessage: "规则的id不能为空"} RuleNameNull = Errno{Code: 51026, Message: "Rule name should not be empty", CNMessage: "安全规则的名称不能为空"} RuleExisted = Errno{Code: 51027, Message: "Rule already existed ", CNMessage: "规则已存在"} diff --git a/dbm-services/mysql/db-partition/assests/migrations/000006_init.down.sql b/dbm-services/mysql/db-partition/assests/migrations/000006_init.down.sql new file mode 100644 index 0000000000..a4f40086f9 --- /dev/null +++ b/dbm-services/mysql/db-partition/assests/migrations/000006_init.down.sql @@ -0,0 +1 @@ +SET NAMES utf8; diff --git a/dbm-services/mysql/db-partition/assests/migrations/000006_init.up.sql b/dbm-services/mysql/db-partition/assests/migrations/000006_init.up.sql new file mode 100644 index 0000000000..47135828da --- /dev/null +++ b/dbm-services/mysql/db-partition/assests/migrations/000006_init.up.sql @@ -0,0 +1,5 @@ +SET NAMES utf8; +alter table mysql_partition_cron_log drop column bk_biz_id,drop column cluster_id,drop column ticket_id, + drop column immute_domain,drop column bk_cloud_id,drop column time_zone; +alter table spider_partition_cron_log drop column bk_biz_id,drop column cluster_id,drop column ticket_id, + drop column immute_domain,drop column bk_cloud_id,drop column time_zone; diff --git a/dbm-services/mysql/db-partition/cron/cron_basic_func.go b/dbm-services/mysql/db-partition/cron/cron_basic_func.go deleted file mode 100644 index 51bbf5933e..0000000000 --- a/dbm-services/mysql/db-partition/cron/cron_basic_func.go +++ /dev/null @@ -1,286 +0,0 @@ -/* - * TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-DB管理系统(BlueKing-BK-DBM) available. - * Copyright (C) 2017-2023 THL A29 Limited, a Tencent company. All rights reserved. - * Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at https://opensource.org/licenses/MIT - * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on - * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the - * specific language governing permissions and limitations under the License. - */ - -package cron - -import ( - "dbm-services/mysql/db-partition/util" - "fmt" - "log/slog" - "sync" - "time" - - "dbm-services/common/go-pubpkg/errno" - "dbm-services/mysql/db-partition/model" - "dbm-services/mysql/db-partition/monitor" - "dbm-services/mysql/db-partition/service" -) - -// Scheduler TODO -var Scheduler string - -// Run TODO -func (m PartitionJob) Run() { - var err error - var key string - Scheduler = "127.0.0.1" - offetSeconds := m.ZoneOffset * 60 * 60 - zone := time.FixedZone(m.ZoneName, offetSeconds) - m.CronDate = time.Now().In(zone).Format("20060102") - key = fmt.Sprintf("%s_%s_%d_%s", m.CronType, m.Hour, m.ZoneOffset, m.CronDate) - flag, err := model.Lock(key) - if err != nil { - dimension := monitor.NewDeveloperEventDimension(Scheduler) - content := fmt.Sprintf("partition error. set redis mutual exclusion error: %s", err.Error()) - monitor.SendEvent(monitor.PartitionDeveloperEvent, dimension, content, Scheduler) - slog.Error("msg", "model.Lock err", err) - } else if flag { - m.ExecutePartition(service.Tendbha) - m.ExecutePartition(service.Tendbcluster) - } else { - slog.Warn("set redis mutual exclusion fail, do nothing", "key", key) - } -} - -// ExecutePartition 执行业务的分区 -func (m PartitionJob) ExecutePartition(clusterType string) { - zone := fmt.Sprintf("%+03d:00", m.ZoneOffset) - needMysql, errOuter := service.NeedPartition(m.CronType, clusterType, m.ZoneOffset, m.CronDate) - if errOuter != nil { - dimension := monitor.NewDeveloperEventDimension(Scheduler) - content := fmt.Sprintf("partition error. get need partition list fail: %s", errOuter.Error()) - monitor.SendEvent(monitor.PartitionDeveloperEvent, dimension, content, Scheduler) - slog.Error("msg", "get need partition list fail", errOuter) - return - } - var UniqMap = make(map[int]struct{}) - var UniqMachine = make(map[string]struct{}) - var cloudMachineList = make(map[int64][]string) - if clusterType != service.Tendbha && service.Tendbcluster != clusterType { - slog.Error(fmt.Sprintf("cluster type %s not support", clusterType)) - return - } - - for _, need := range needMysql { - if _, isExists := UniqMap[need.BkBizId]; isExists == false { - UniqMap[need.BkBizId] = struct{}{} - clusters, err := service.GetAllClustersInfo(service.BkBizId{BkBizId: int64(need.BkBizId)}) - if err != nil { - dimension := monitor.NewDeveloperEventDimension(Scheduler) - content := fmt.Sprintf("partition error. "+ - "get cluster from dbmeta/priv_manager/biz_clusters error: %s", err.Error()) - monitor.SendEvent(monitor.PartitionDeveloperEvent, dimension, content, Scheduler) - slog.Error("msg", "partition error. "+ - "get cluster from dbmeta/priv_manager/biz_clusters error", err) - continue - } - for _, cluster := range clusters { - if clusterType == service.Tendbha { - for _, storage := range cluster.Storages { - if storage.InstanceRole == service.Orphan || storage.InstanceRole == service.BackendMaster { - if _, existFlag := UniqMachine[fmt.Sprintf("%s|%d", - storage.IP, cluster.BkCloudId)]; existFlag == false { - cloudMachineList[cluster.BkCloudId] = append( - cloudMachineList[cluster.BkCloudId], storage.IP) - } - } - } - } else { - for _, storage := range cluster.Storages { - if storage.InstanceRole == service.RemoteMaster { - if _, existFlag := UniqMachine[fmt.Sprintf("%s|%d", - storage.IP, cluster.BkCloudId)]; existFlag == false { - cloudMachineList[cluster.BkCloudId] = append( - cloudMachineList[cluster.BkCloudId], storage.IP) - } - } - } - } - } - } - } - var wgDownload sync.WaitGroup - tokenBucketDownload := make(chan int, 5) - for cloud, machines := range cloudMachineList { - tmp := util.SplitArray(machines, 20) - for _, ips := range tmp { - wgDownload.Add(1) - tokenBucketDownload <- 0 - go func(cloud int64, ips []string) { - defer func() { - <-tokenBucketDownload - wgDownload.Done() - }() - // 按照机器提前下载好dbactor,减少重复下次 - err := service.DownloadDbactor(cloud, ips) - if err != nil { - dimension := monitor.NewDeveloperEventDimension(Scheduler) - content := fmt.Sprintf("%v download dbactor fail: %s", ips, err.Error()) - monitor.SendEvent(monitor.PartitionDeveloperEvent, dimension, content, Scheduler) - slog.Error("msg", "download dbactor fail. "+ - "dbmeta/apis/v1/flow/scene/download_dbactor/ error", err) - return - } - // 下发dbactor时间,避免造成瓶颈 - time.Sleep(2 * time.Minute) - }(cloud, ips) - } - } - wgDownload.Wait() - close(tokenBucketDownload) - - var wg sync.WaitGroup - tokenBucket := make(chan int, 10) - for _, item := range needMysql { - wg.Add(1) - tokenBucket <- 0 - go func(item *service.Checker) { - defer func() { - <-tokenBucket - wg.Done() - }() - item.FromCron = true - objects, err := (*item).DryRun() - if err != nil { - code, _ := errno.DecodeErr(err) - if code == errno.NothingToDo.Code { - // 当天首次执行发现没有需要执行的sql,记录日志。重试没有执行的sql,不需要记录日志。 - if m.CronType == "daily" { - _ = service.AddLog(item.ConfigId, item.BkBizId, item.ClusterId, *item.BkCloudId, 0, - item.ImmuteDomain, zone, m.CronDate, Scheduler, errno.NothingToDo.Message, service.CheckSucceeded, - item.ClusterType) - } - } else { - dimension := monitor.NewPartitionEventDimension(item.BkBizId, item.DbAppAbbr, item.BkBizName, - *item.BkCloudId, item.ImmuteDomain) - content := fmt.Sprintf("partition error. get partition sql fail: %s", err.Error()) - monitor.SendEvent(monitor.PartitionEvent, dimension, content, "127.0.0.1") - _ = service.AddLog(item.ConfigId, item.BkBizId, item.ClusterId, *item.BkCloudId, 0, - item.ImmuteDomain, zone, m.CronDate, Scheduler, content, service.CheckFailed, item.ClusterType) - slog.Error(fmt.Sprintf("%v", *item), "get partition sql fail", err) - } - return - } - slog.Info("do create partition ticket") - service.CreatePartitionTicket(*item, objects, m.ZoneOffset, m.CronDate, Scheduler) - time.Sleep(30 * time.Second) - }(item) - } - wg.Wait() - close(tokenBucket) -} - -// ExecutePartitionOneTime 一次性调度 -func (m PartitionJob) ExecutePartitionOneTime(clusterType string) { - needMysql, errOuter := service.NeedPartition(m.CronType, clusterType, m.ZoneOffset, m.CronDate) - if errOuter != nil { - slog.Error("testtest", "get need partition list fail", errOuter) - return - } - var UniqMap = make(map[int]struct{}) - var UniqMachine = make(map[string]struct{}) - var cloudMachineList = make(map[int64][]string) - if clusterType != service.Tendbha && service.Tendbcluster != clusterType { - slog.Error(fmt.Sprintf("cluster type %s not support", clusterType)) - return - } - - for _, need := range needMysql { - if _, isExists := UniqMap[need.BkBizId]; isExists == false { - UniqMap[need.BkBizId] = struct{}{} - clusters, err := service.GetAllClustersInfo(service.BkBizId{BkBizId: int64(need.BkBizId)}) - if err != nil { - dimension := monitor.NewDeveloperEventDimension(Scheduler) - content := fmt.Sprintf("partition error. "+ - "get cluster from dbmeta/priv_manager/biz_clusters error: %s", err.Error()) - monitor.SendEvent(monitor.PartitionDeveloperEvent, dimension, content, Scheduler) - slog.Error("msg", "partition error. "+ - "get cluster from dbmeta/priv_manager/biz_clusters error", err) - continue - } - for _, cluster := range clusters { - if clusterType == service.Tendbha { - for _, storage := range cluster.Storages { - if storage.InstanceRole == service.Orphan || storage.InstanceRole == service.BackendMaster { - if _, existFlag := UniqMachine[fmt.Sprintf("%s|%d", - storage.IP, cluster.BkCloudId)]; existFlag == false { - cloudMachineList[cluster.BkCloudId] = append( - cloudMachineList[cluster.BkCloudId], storage.IP) - } - } - } - } else { - for _, storage := range cluster.Storages { - if storage.InstanceRole == service.RemoteMaster { - if _, existFlag := UniqMachine[fmt.Sprintf("%s|%d", - storage.IP, cluster.BkCloudId)]; existFlag == false { - cloudMachineList[cluster.BkCloudId] = append( - cloudMachineList[cluster.BkCloudId], storage.IP) - } - } - } - } - } - } - } - - var wgDownload sync.WaitGroup - tokenBucketDownload := make(chan int, 5) - for cloud, machines := range cloudMachineList { - tmp := util.SplitArray(machines, 20) - for _, ips := range tmp { - wgDownload.Add(1) - tokenBucketDownload <- 0 - go func(cloud int64, ips []string) { - defer func() { - <-tokenBucketDownload - wgDownload.Done() - }() - // 按照机器提前下载好dbactor,减少重复下次 - err := service.DownloadDbactor(cloud, ips) - if err != nil { - slog.Error("msg", "download dbactor fail. "+ - "dbmeta/apis/v1/flow/scene/download_dbactor error", err) - return - } - // 下发dbactor时间,避免造成瓶颈 - time.Sleep(2 * time.Minute) - }(cloud, ips) - } - } - wgDownload.Wait() - close(tokenBucketDownload) - - var wg sync.WaitGroup - tokenBucket := make(chan int, 10) - for _, item := range needMysql { - wg.Add(1) - tokenBucket <- 0 - go func(item *service.Checker) { - defer func() { - <-tokenBucket - wg.Done() - }() - objects, err := (*item).DryRun() - if err != nil { - code, _ := errno.DecodeErr(err) - if code != errno.NothingToDo.Code { - slog.Error(fmt.Sprintf("%v", *item), "get partition sql fail", err) - } - return - } - slog.Info("do create partition ticket") - service.CreatePartitionTicket(*item, objects, m.ZoneOffset, m.CronDate, Scheduler) - time.Sleep(30 * time.Second) - }(item) - } - wg.Wait() - close(tokenBucket) -} diff --git a/dbm-services/mysql/db-partition/cron/cron_object.go b/dbm-services/mysql/db-partition/cron/cron_object.go deleted file mode 100644 index 46c0ab7f82..0000000000 --- a/dbm-services/mysql/db-partition/cron/cron_object.go +++ /dev/null @@ -1,17 +0,0 @@ -package cron - -// Daily TODO -const Daily = "daily" - -// Retry TODO -const Retry = "retry" - -// PartitionJob TODO -type PartitionJob struct { - CronType string `json:"cron_type"` - ZoneOffset int `json:"zone_offset"` - ZoneName string `json:"zone_name"` - CronDate string `json:"cron_date"` - Hour string `json:"hour"` - ClusterType string `json:"cluster_type"` -} diff --git a/dbm-services/mysql/db-partition/go.mod b/dbm-services/mysql/db-partition/go.mod index 354f3c7645..3cc6f0d4c5 100644 --- a/dbm-services/mysql/db-partition/go.mod +++ b/dbm-services/mysql/db-partition/go.mod @@ -13,6 +13,7 @@ require ( github.com/spf13/viper v1.16.0 go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin v0.46.1 golang.org/x/exp v0.0.0-20230315142452-642cacee5cc0 + golang.org/x/time v0.1.0 gopkg.in/natefinch/lumberjack.v2 v2.2.1 gorm.io/driver/mysql v1.5.1 gorm.io/gorm v1.25.4 diff --git a/dbm-services/mysql/db-partition/go.sum b/dbm-services/mysql/db-partition/go.sum index 5f9daa13da..b4bf1df40c 100644 --- a/dbm-services/mysql/db-partition/go.sum +++ b/dbm-services/mysql/db-partition/go.sum @@ -478,6 +478,8 @@ golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.1.0 h1:xYY+Bajn2a7VBmTM5GikTmnK8ZuX8YgnQCqZpbBNtmA= +golang.org/x/time v0.1.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= diff --git a/dbm-services/mysql/db-partition/handler/handler.go b/dbm-services/mysql/db-partition/handler/handler.go index 8189a0786e..62cbd592d8 100644 --- a/dbm-services/mysql/db-partition/handler/handler.go +++ b/dbm-services/mysql/db-partition/handler/handler.go @@ -2,19 +2,15 @@ package handler import ( + "dbm-services/mysql/db-partition/model" + "dbm-services/mysql/db-partition/monitor" "errors" "fmt" "log/slog" "net/http" _ "runtime/debug" // debug TODO - "strconv" - "strings" "time" - "dbm-services/mysql/db-partition/monitor" - - "dbm-services/mysql/db-partition/cron" - cron_pkg "github.com/robfig/cron/v3" "dbm-services/common/go-pubpkg/errno" @@ -261,7 +257,7 @@ func UpdatePartitionsConfig(r *gin.Context) { return } -// CreatePartitionLog 用于创建分区后马上执行分区规则,将执行单据的信息记录到日志表中 +// CreatePartitionLog 单据callback,信息记录到日志表中 func CreatePartitionLog(r *gin.Context) { var input service.CreatePartitionCronLog err := r.ShouldBind(&input) @@ -271,22 +267,30 @@ func CreatePartitionLog(r *gin.Context) { SendResponse(r, err, nil) return } - // 计算单据处于集群时区的日期 - offsetStr := strings.Split(input.TimeZone, ":")[0] - offset, _ := strconv.Atoi(offsetStr) - offetSeconds := offset * 60 * 60 - name := fmt.Sprintf("UTC%s", offsetStr) - zone := time.FixedZone(name, offetSeconds) - date := time.Now().In(zone).Format("20060102") - err = service.AddLog(input.ConfigId, input.BkBizId, input.ClusterId, input.BkCloudId, input.TicketId, - input.ImmuteDomain, input.TimeZone, date, "from_ticket", "", - service.ExecuteAsynchronous, input.ClusterType) - if err != nil { - slog.Error(err.Error()) - SendResponse(r, err, nil) - return - } - SendResponse(r, nil, "插入分区日志成功") + tx := model.DB.Self.Begin() + tb := service.MysqlPartitionCronLogTable + if input.ClusterType == service.Tendbcluster { + tb = service.SpiderPartitionCronLogTable + } + today := time.Now().Format("20060102") + for _, l := range input.Logs { + var vdate string + if l.CronDate == "" { + vdate = today + } else { + vdate = l.CronDate + } + log := &service.PartitionCronLog{ConfigId: l.ConfigId, CronDate: vdate, + Scheduler: l.Scheduler, CheckInfo: l.CheckInfo, Status: l.Status} + err = tx.Debug().Table(tb).Create(log).Error + if err != nil { + tx.Rollback() + slog.Error("msg", "add cron log failed", err) + break + } + } + tx.Commit() + SendResponse(r, err, nil) return } @@ -321,7 +325,7 @@ func MigrateConfig(r *gin.Context) { // CronEntries 查询定时任务 func CronEntries(r *gin.Context) { var entries []cron_pkg.Entry - for _, v := range cron.CronList { + for _, v := range service.CronList { entries = append(entries, v.Entries()...) } slog.Info("msg", "entries", entries) @@ -331,7 +335,7 @@ func CronEntries(r *gin.Context) { // CronStop 关闭分区定时任务 func CronStop(r *gin.Context) { - for _, v := range cron.CronList { + for _, v := range service.CronList { v.Stop() } SendResponse(r, nil, "关闭分区定时任务成功") @@ -340,7 +344,7 @@ func CronStop(r *gin.Context) { // CronStart 开启分区定时任务 func CronStart(r *gin.Context) { - for _, v := range cron.CronList { + for _, v := range service.CronList { v.Start() } SendResponse(r, nil, "开启分区定时任务成功") @@ -349,7 +353,7 @@ func CronStart(r *gin.Context) { // RunOnce 调度执行一次 func RunOnce(r *gin.Context) { - var input cron.PartitionJob + var input service.PartitionJob err := r.ShouldBind(&input) if err != nil { err = errno.ErrReadEntity.Add(err.Error()) @@ -357,8 +361,12 @@ func RunOnce(r *gin.Context) { SendResponse(r, err, nil) return } - input.ExecutePartitionOneTime(input.ClusterType) - SendResponse(r, nil, "调度一次成功") + if input.ClusterType == service.Tendbha { + input.ExecuteTendbhaPartition() + } else if input.ClusterType == service.Tendbcluster { + input.ExecuteTendbclusterPartition() + } + SendResponse(r, nil, "异步执行,执行结果请看日志") return } diff --git a/dbm-services/mysql/db-partition/main.go b/dbm-services/mysql/db-partition/main.go index 595d0e7ad8..4858fc4545 100644 --- a/dbm-services/mysql/db-partition/main.go +++ b/dbm-services/mysql/db-partition/main.go @@ -1,6 +1,7 @@ package main import ( + "dbm-services/mysql/db-partition/service" "net/http" "os" @@ -14,7 +15,6 @@ import ( "dbm-services/common/go-pubpkg/apm/metric" "dbm-services/common/go-pubpkg/apm/trace" "dbm-services/mysql/db-partition/assests" - "dbm-services/mysql/db-partition/cron" "dbm-services/mysql/db-partition/model" "dbm-services/mysql/db-partition/monitor" "dbm-services/mysql/db-partition/router" @@ -37,7 +37,7 @@ func main() { monitor.InitMonitor() // 注册定时任务 - cronList, err := cron.RegisterCron() + cronList, err := service.RegisterCron() if err != nil { os.Exit(0) } @@ -76,4 +76,5 @@ func init() { model.InitLog() model.DB.Init() model.InitClient() + model.InitBkRepo() } diff --git a/dbm-services/mysql/db-partition/model/init_bkrepo.go b/dbm-services/mysql/db-partition/model/init_bkrepo.go new file mode 100644 index 0000000000..fa96bbb0c5 --- /dev/null +++ b/dbm-services/mysql/db-partition/model/init_bkrepo.go @@ -0,0 +1,26 @@ +package model + +import "github.com/spf13/viper" + +// BkRepo 蓝鲸介质中心信息 +var BkRepo BkRepoConfig + +// InitBkRepo 初始化介质中心信息 +func InitBkRepo() { + BkRepo = BkRepoConfig{ + PublicBucket: viper.GetString("bkrepo.public_bucket"), + Project: viper.GetString("bkrepo.project"), + User: viper.GetString("bkrepo.username"), + Pwd: viper.GetString("bkrepo.password"), + EndPointUrl: viper.GetString("bkrepo.endpoint_url"), + } +} + +// BkRepoConfig 蓝鲸介质中心 +type BkRepoConfig struct { + Project string `yaml:"project"` + PublicBucket string `yaml:"publicBucket"` + User string `yaml:"user"` + Pwd string `yaml:"pwd"` + EndPointUrl string `yaml:"endpointUrl"` +} diff --git a/dbm-services/mysql/db-partition/model/init_env.go b/dbm-services/mysql/db-partition/model/init_env.go index 5badeb8199..26ee8f8389 100644 --- a/dbm-services/mysql/db-partition/model/init_env.go +++ b/dbm-services/mysql/db-partition/model/init_env.go @@ -22,6 +22,8 @@ func InitEnv() { viper.BindEnv("listen_address", "LISTEN_ADDRESS") viper.BindEnv("cron.timing_hour", "CRON_TIMING_HOUR") viper.BindEnv("cron.retry_hour", "CRON_RETRY_HOUR") + viper.BindEnv("alarm.timezone", "ALARM_TIMEZONE") + viper.BindEnv("alarm.hour", "ALARM_HOUR") viper.BindEnv("db_remote_service", "DB_REMOTE_SERVICE") viper.BindEnv("db_meta_service", "DB_META_SERVICE") @@ -45,6 +47,13 @@ func InitEnv() { viper.BindEnv("log.max_age", "LOG_MAX_AGE") viper.BindEnv("log.max_backups", "LOG_MAX_BACKUPS") + // bkrepo + viper.BindEnv("bkrepo.project", "BKREPO_PROJECT") + viper.BindEnv("bkrepo.public_bucket", "BKREPO_PUBLIC_BUCKET") + viper.BindEnv("bkrepo.username", "BKREPO_USERNAME") + viper.BindEnv("bkrepo.password", "BKREPO_PASSWORD") + viper.BindEnv("bkrepo.endpoint_url", "BKREPO_ENDPOINT_URL") + flag.Bool("migrate", false, "run migrate to databases, not exit.") viper.BindPFlags(flag.CommandLine) diff --git a/dbm-services/mysql/db-partition/monitor/monitor.go b/dbm-services/mysql/db-partition/monitor/monitor.go index 2eb4ebb726..6ae2ccbe3e 100644 --- a/dbm-services/mysql/db-partition/monitor/monitor.go +++ b/dbm-services/mysql/db-partition/monitor/monitor.go @@ -14,9 +14,6 @@ import ( "github.com/spf13/viper" ) -// PartitionEvent TODO -const PartitionEvent = "partition" - // PartitionDeveloperEvent TODO const PartitionDeveloperEvent = "partition_dev" @@ -24,7 +21,7 @@ const PartitionDeveloperEvent = "partition_dev" const PartitionCron = "partition_cron" // SendEvent 发送自定义监控事件 -func SendEvent(eventName string, dimension map[string]interface{}, content string, serverIp string) { +func SendEvent(dimension map[string]interface{}, content string, serverIp string) { l, _ := time.LoadLocation("Local") body := eventBody{ commonBody: commonBody{ @@ -33,7 +30,7 @@ func SendEvent(eventName string, dimension map[string]interface{}, content strin }, Data: []eventData{ { - EventName: eventName, + EventName: PartitionDeveloperEvent, Event: map[string]interface{}{ "content": content, }, @@ -57,32 +54,20 @@ func SendEvent(eventName string, dimension map[string]interface{}, content strin } // NewDeveloperEventDimension 构建自定监控事件的维度,发送给平台管理员 -func NewDeveloperEventDimension(serverIp string) map[string]interface{} { +func NewDeveloperEventDimension(serverIp string, domain string) map[string]interface{} { dimension := make(map[string]interface{}) dimension["appid"] = viper.GetString("dba.bk_biz_id") dimension["bk_biz_id"] = viper.GetString("dba.bk_biz_id") dimension["bk_cloud_id"] = 0 - dimension["cluster_domain"] = PartitionCron - dimension["server_ip"] = serverIp - dimension["machine_type"] = PartitionCron - return dimension -} - -// NewPartitionEventDimension 构建自定监控事件的维度,发送给业务的dba -func NewPartitionEventDimension(bkBizId int, dbAppAbbr string, bkBizName string, bkCloudId int, domain string) map[string]interface{} { - dimension := make(map[string]interface{}) - dimension["appid"] = bkBizId - dimension["bk_biz_id"] = bkBizId - dimension["db_app_abbr"] = dbAppAbbr - dimension["bk_biz_name"] = bkBizName - dimension["bk_cloud_id"] = bkCloudId dimension["cluster_domain"] = domain + dimension["machine_type"] = domain + dimension["server_ip"] = serverIp return dimension } // TestSendEvent 测试监控上报链路 func TestSendEvent(dataId int, token string, serviceHost string) error { - dimension := NewDeveloperEventDimension("127.0.0.1") + dimension := NewDeveloperEventDimension("127.0.0.1", PartitionCron) l, _ := time.LoadLocation("Local") body := eventBody{ diff --git a/dbm-services/mysql/db-partition/service/bk_repo_service.go b/dbm-services/mysql/db-partition/service/bk_repo_service.go new file mode 100644 index 0000000000..fceb757d7a --- /dev/null +++ b/dbm-services/mysql/db-partition/service/bk_repo_service.go @@ -0,0 +1,76 @@ +package service + +import ( + "bytes" + "dbm-services/mysql/db-partition/model" + "encoding/json" + "fmt" + "io" + "log/slog" + "mime/multipart" + "net/http" + "net/url" + "os" + "path" +) + +// UploadDirectToBkRepo 上传文件到介质中心 +func UploadDirectToBkRepo(filename string) (*BkRepoRespone, error) { + // 路径需要包含文件名称 + targetURL, err := url.JoinPath(model.BkRepo.EndPointUrl, + path.Join("generic", model.BkRepo.Project, model.BkRepo.PublicBucket, "mysql", "partition", filename)) + if err != nil { + slog.Error("get url fail") + return nil, err + } + slog.Info(fmt.Sprintf("start upload files from %s to %s", filename, targetURL)) + bodyBuf := bytes.NewBufferString("") + bodyWriter := multipart.NewWriter(bodyBuf) + fh, err := os.Open(filename) + if err != nil { + slog.Error("opening file error", "err", err) + return nil, err + } + boundary := bodyWriter.Boundary() + closeBuf := bytes.NewBufferString("") + + requestReader := io.MultiReader(bodyBuf, fh, closeBuf) + fi, err := fh.Stat() + if err != nil { + slog.Error("stating file error", "file", filename, "err", err) + return nil, err + } + req, err := http.NewRequest("PUT", targetURL, requestReader) + if err != nil { + return nil, err + } + req.SetBasicAuth(model.BkRepo.User, model.BkRepo.Pwd) + // Set headers for multipart, and Content Length + req.Header.Set("Content-Type", "multipart/form-data; boundary="+boundary) + // 文件是否可以被覆盖,默认false + req.Header.Set("X-BKREPO-OVERWRITE", "True") + // 文件默认保留半年 + req.Header.Set("X-BKREPO-EXPIRES", "15") + req.ContentLength = fi.Size() + int64(bodyBuf.Len()) + int64(closeBuf.Len()) + resp, err := http.DefaultClient.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("返回码非200 %d", resp.StatusCode) + } + var baseResp BkRepoRespone + if err = json.NewDecoder(resp.Body).Decode(&baseResp); err != nil { + return nil, err + } + return &baseResp, err +} + +// BkRepoRespone 响应 +type BkRepoRespone struct { + Code int `json:"code"` + Message string `json:"message"` + Data json.RawMessage `json:"data"` + RequestId string `json:"request_id"` +} diff --git a/dbm-services/mysql/db-partition/service/check_partition.go b/dbm-services/mysql/db-partition/service/check_partition.go index ef7139b2b6..e7df338aae 100644 --- a/dbm-services/mysql/db-partition/service/check_partition.go +++ b/dbm-services/mysql/db-partition/service/check_partition.go @@ -2,25 +2,23 @@ package service import ( "context" + "dbm-services/common/go-pubpkg/errno" + "dbm-services/mysql/db-partition/model" "errors" "fmt" "log/slog" - "strconv" "strings" "sync" "time" - "dbm-services/common/go-pubpkg/errno" - "dbm-services/mysql/db-partition/model" + "golang.org/x/time/rate" ) -// DryRun TODO +// DryRun 生成分区sql,页面展示 func (m *Checker) DryRun() ([]PartitionObject, error) { slog.Info("do service DryRun") var objects []PartitionObject - var sqls []PartitionSql - var err error - var needPartition bool + var errOuter error if m.BkBizId == 0 { return objects, errno.BkBizIdIsEmpty } @@ -30,6 +28,9 @@ func (m *Checker) DryRun() ([]PartitionObject, error) { if m.BkCloudId == nil { return objects, errno.CloudIdRequired } + if m.ConfigId == 0 { + return objects, errno.RuleIdNull + } var configs []*PartitionConfig var tbName string switch m.ClusterType { @@ -41,20 +42,11 @@ func (m *Checker) DryRun() ([]PartitionObject, error) { slog.Error(m.ClusterType, "error", errno.NotSupportedClusterType.Error()) return objects, errno.NotSupportedClusterType } - if m.ConfigId == 0 { - err = model.DB.Self.Table(tbName).Where("bk_biz_id = ? and cluster_id = ?", m.BkBizId, m.ClusterId).Scan(&configs). - Error - if err != nil { - slog.Error("msg", fmt.Sprintf("query %s err", tbName), err) - return objects, err - } - } else { - err = model.DB.Self.Table(tbName).Where("bk_biz_id = ? and cluster_id = ? and id = ?", m.BkBizId, m.ClusterId, - m.ConfigId).Scan(&configs).Error - if err != nil { - slog.Error("msg", fmt.Sprintf("query %s err", tbName), err) - return objects, err - } + errOuter = model.DB.Self.Table(tbName).Where("bk_biz_id = ? and cluster_id = ? and id = ?", m.BkBizId, m.ClusterId, + m.ConfigId).Scan(&configs).Error + if errOuter != nil { + slog.Error("msg", fmt.Sprintf("query %s err", tbName), errOuter) + return objects, errOuter } if len(configs) == 0 { return objects, errno.PartitionConfigNotExisted @@ -63,43 +55,65 @@ func (m *Checker) DryRun() ([]PartitionObject, error) { slog.Info(fmt.Sprintf("configs:%v", configs)) switch m.ClusterType { case Tendbha, Tendbsingle: - newConfigs, err := GetMaster(configs, m.ImmuteDomain, m.ClusterType) + ins, err := GetMaster(m.ImmuteDomain, m.ClusterType) if err != nil { slog.Error("msg", "GetClusterMasterError", err) return objects, err } - sqls, err = m.CheckPartitionConfigs(newConfigs, "mysql", 1) + sqls, _, _, err := CheckPartitionConfigs(configs, "mysql", + 1, false, Host{Ip: ins.Ip, Port: ins.Port, BkCloudId: ins.BkCloudId}) if err != nil { slog.Error("msg", "CheckPartitionConfigs", err) - return objects, err + return objects, errno.GetPartitionSqlFail.Add(fmt.Sprintf("%s:%d\n%s", ins.Ip, + ins.Port, err.Error())) + } + if len(sqls) == 0 { + return objects, errno.NothingToDo } - objects = []PartitionObject{{"0.0.0.0", 0, "null", sqls}} + objects = append(objects, PartitionObject{Ip: ins.Ip, Port: ins.Port, ShardName: "null", + ExecuteObjects: sqls}) case Tendbcluster: - objects, err = m.CheckSpiderPartitionConfigs(configs) + cluster := fmt.Sprintf("%s|%d|%d", m.ImmuteDomain, m.Port, m.BkCloudId) + hostNodes, splitCnt, err := GetTendbclusterInstances(cluster) if err != nil { - slog.Error("msg", "CheckSpiderPartitionConfigs", err) + slog.Error("msg", "GetTendbclusterInstances", err) return objects, err } + for _, instances := range hostNodes { + for _, ins := range instances { + newconfigs := make([]*PartitionConfig, len(configs)) + for k, v := range configs { + newconfig := *v + if ins.Wrapper == "mysql" { + newconfig.DbLike = fmt.Sprintf("%s_%s", newconfig.DbLike, ins.SplitNum) + } + newconfigs[k] = &newconfig + } + sqls, _, _, errInner := CheckPartitionConfigs(newconfigs, ins.Wrapper, + splitCnt, false, Host{Ip: ins.Ip, Port: ins.Port, BkCloudId: int64(ins.Cloud)}) + if errInner != nil { + slog.Error("msg", "CheckPartitionConfigs", err) + return objects, errno.GetPartitionSqlFail.Add(fmt.Sprintf("%s:%d\n%s", ins.Ip, + ins.Port, errInner.Error())) + } + if len(sqls) == 0 { + continue + } + objects = append(objects, PartitionObject{Ip: ins.Ip, Port: ins.Port, ShardName: ins.ServerName, + ExecuteObjects: sqls}) + } + } + if len(objects) == 0 { + return objects, errno.NothingToDo + } default: slog.Error(m.ClusterType, "error", errors.New("not supported db type")) return objects, errno.NotSupportedClusterType } - - for _, item := range objects { - for _, execute := range item.ExecuteObjects { - // 集群没有需要执行的分区语句并且在获取分区语句时没有错误,则不生成单据 - if len(execute.AddPartition) != 0 || len(execute.DropPartition) != 0 || len(execute.InitPartition) != 0 { - needPartition = true - break - } - } - } - if needPartition == false { - return objects, errno.NothingToDo - } return objects, nil } +/* // CheckSpiderPartitionConfigs TODO func (m *Checker) CheckSpiderPartitionConfigs(configs []*PartitionConfig) ([]PartitionObject, error) { fmt.Printf("do CheckSpiderPartitionConfigs") @@ -123,7 +137,7 @@ func (m *Checker) CheckSpiderPartitionConfigs(configs []*PartitionConfig) ([]Par } newconfigs[k] = &newconfig } - execute, err := m.CheckPartitionConfigs(newconfigs, item["WRAPPER"].(string), splitCnt) + execute, err := CheckPartitionConfigs(newconfigs, item["WRAPPER"].(string), splitCnt, m.FromCron) if err != nil { slog.Error("msg", "CheckPartitionConfigs", err) return all, errno.GetPartitionSqlFail.Add(fmt.Sprintf("spit%s %s:%s\n%s", item["SPLIT_NUM"], item["HOST"], @@ -131,25 +145,35 @@ func (m *Checker) CheckSpiderPartitionConfigs(configs []*PartitionConfig) ([]Par } all = append(all, PartitionObject{host, port, item["SERVER_NAME"].(string), execute}) } - return all, nil } +*/ // CheckPartitionConfigs TODO -func (m *Checker) CheckPartitionConfigs(configs []*PartitionConfig, dbtype string, splitCnt int) ([]PartitionSql, - error) { +func CheckPartitionConfigs(configs []*PartitionConfig, dbtype string, splitCnt int, fromCron bool, host Host) ([]PartitionSql, + []PartitionConfig, []PartitionConfig, error) { fmt.Printf("do CheckPartitionConfigs") var errMsg Messages sqlSet := PartitionSqlSet{} + nothingToDoSet := ConfigSet{} wg := sync.WaitGroup{} - tokenBucket := make(chan int, 10) + limit := rate.Every(time.Millisecond * 200) // QPS:5 + burst := 10 // 桶容量 10 + limiter := rate.NewLimiter(limit, burst) for _, config := range configs { wg.Add(1) - tokenBucket <- 0 ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) go func(config *PartitionConfig) { + err := limiter.Wait(context.Background()) + if err != nil { + errMsg.mu.Lock() + errMsg.list = append(errMsg.list, err.Error()) + errMsg.mu.Unlock() + return + } slog.Info(fmt.Sprintf("%s:%v", "CheckOnePartitionConfig", config)) - err := m.CheckOnePartitionConfig(ctx, cancel, *config, &wg, &tokenBucket, &sqlSet, dbtype, splitCnt) + err = CheckOnePartitionConfig(ctx, cancel, *config, &wg, &sqlSet, ¬hingToDoSet, dbtype, splitCnt, + fromCron, host) if err != nil { errMsg.mu.Lock() errMsg.list = append(errMsg.list, err.Error()) @@ -158,22 +182,21 @@ func (m *Checker) CheckPartitionConfigs(configs []*PartitionConfig, dbtype strin }(config) } wg.Wait() - close(tokenBucket) if len(errMsg.list) > 0 { - return sqlSet.PartitionSqls, fmt.Errorf(strings.Join(errMsg.list, "\n")) + return sqlSet.PartitionSqls, sqlSet.Configs, nothingToDoSet.Configs, fmt.Errorf(strings.Join(errMsg.list, "\n")) } - return sqlSet.PartitionSqls, nil + return sqlSet.PartitionSqls, sqlSet.Configs, nothingToDoSet.Configs, nil } // CheckOnePartitionConfig TODO -func (m *Checker) CheckOnePartitionConfig(ctx context.Context, cancel context.CancelFunc, config PartitionConfig, - wg *sync.WaitGroup, tokenBucket *chan int, sqlSet *PartitionSqlSet, dbtype string, splitCnt int) error { +func CheckOnePartitionConfig(ctx context.Context, cancel context.CancelFunc, config PartitionConfig, + wg *sync.WaitGroup, sqlSet *PartitionSqlSet, nothingToDoSet *ConfigSet, + dbtype string, splitCnt int, fromCron bool, host Host) error { fmt.Printf("do CheckOnePartitionConfig") var addSql, dropSql []string var err error var initSql []InitSql defer func() { - <-*tokenBucket wg.Done() cancel() }() @@ -184,15 +207,24 @@ func (m *Checker) CheckOnePartitionConfig(ctx context.Context, cancel context.Ca defer func() { finish <- 1 }() - initSql, addSql, dropSql, err = config.GetPartitionDbLikeTbLike(dbtype, splitCnt, m.FromCron) + initSql, addSql, dropSql, err = config.GetPartitionDbLikeTbLike(dbtype, splitCnt, fromCron, host) if err != nil { errorChan <- err return } - sqlSet.Mu.Lock() - sqlSet.PartitionSqls = append(sqlSet.PartitionSqls, PartitionSql{config.ID, config.DbLike, config.TbLike, initSql, - addSql, dropSql}) - sqlSet.Mu.Unlock() + + if len(addSql) != 0 || len(dropSql) != 0 || len(initSql) != 0 { + sqlSet.Mu.Lock() + sqlSet.PartitionSqls = append(sqlSet.PartitionSqls, PartitionSql{config.ID, config.DbLike, config.TbLike, initSql, + addSql, dropSql}) + sqlSet.Configs = append(sqlSet.Configs, config) + sqlSet.Mu.Unlock() + } else { + // 集群没有需要执行的分区语句并且在获取分区语句时没有错误 + nothingToDoSet.Mu.Lock() + nothingToDoSet.Configs = append(nothingToDoSet.Configs, config) + nothingToDoSet.Mu.Unlock() + } return }() diff --git a/dbm-services/mysql/db-partition/service/check_partition_base_func.go b/dbm-services/mysql/db-partition/service/check_partition_base_func.go index 1b1c8d54d2..6359be6625 100644 --- a/dbm-services/mysql/db-partition/service/check_partition_base_func.go +++ b/dbm-services/mysql/db-partition/service/check_partition_base_func.go @@ -12,13 +12,13 @@ import ( "dbm-services/common/go-pubpkg/errno" "dbm-services/mysql/db-partition/model" - "dbm-services/mysql/db-partition/monitor" "github.com/spf13/viper" ) // GetPartitionDbLikeTbLike TODO -func (config *PartitionConfig) GetPartitionDbLikeTbLike(dbtype string, splitCnt int, fromCron bool) ([]InitSql, []string, []string, +func (config *PartitionConfig) GetPartitionDbLikeTbLike(dbtype string, splitCnt int, fromCron bool, host Host) ( + []InitSql, []string, []string, error) { var addSqls, dropSqls, errs Messages var initSqls InitMessages @@ -28,7 +28,7 @@ func (config *PartitionConfig) GetPartitionDbLikeTbLike(dbtype string, splitCnt dropSqls.list = []string{} errs.list = []string{} - tbs, errOuter := config.GetDbTableInfo(fromCron) + tbs, errOuter := config.GetDbTableInfo(fromCron, host) if errOuter != nil { slog.Error("GetDbTableInfo error", errOuter) return nil, nil, nil, fmt.Errorf("get database and table info failed:%s", errOuter.Error()) @@ -49,7 +49,7 @@ func (config *PartitionConfig) GetPartitionDbLikeTbLike(dbtype string, splitCnt wg.Done() }() if tb.Partitioned { - sql, err = tb.GetAddPartitionSql() + sql, err = tb.GetAddPartitionSql(host) if err != nil { slog.Error("msg", "GetAddPartitionSql error", err) AddString(&errs, err.Error()) @@ -59,7 +59,7 @@ func (config *PartitionConfig) GetPartitionDbLikeTbLike(dbtype string, splitCnt if tb.Phase == online { // 启用的分区规则,会执行删除历史分区 // 禁用的分区规则,会新增分区,但是不会删除历史分区 - sql, err = tb.GetDropPartitionSql() + sql, err = tb.GetDropPartitionSql(host) if err != nil { slog.Error("msg", "GetDropPartitionSql error", err) AddString(&errs, err.Error()) @@ -68,7 +68,7 @@ func (config *PartitionConfig) GetPartitionDbLikeTbLike(dbtype string, splitCnt AddString(&dropSqls, sql) } } else { - sql, needSize, err = tb.GetInitPartitionSql(dbtype, splitCnt) + sql, needSize, err = tb.GetInitPartitionSql(dbtype, splitCnt, host) if err != nil { slog.Error("msg", "GetInitPartitionSql error", err) AddString(&errs, err.Error()) @@ -91,8 +91,8 @@ func (config *PartitionConfig) GetPartitionDbLikeTbLike(dbtype string, splitCnt } // GetDbTableInfo TODO -func (config *PartitionConfig) GetDbTableInfo(fromCron bool) (ptlist []ConfigDetail, err error) { - address := fmt.Sprintf("%s:%d", config.ImmuteDomain, config.Port) +func (config *PartitionConfig) GetDbTableInfo(fromCron bool, host Host) (ptlist []ConfigDetail, err error) { + address := fmt.Sprintf("%s:%d", host.Ip, host.Port) slog.Info(fmt.Sprintf("get real partition info from (%s/%s,%s)", address, config.DbLike, config.TbLike)) var output oneAddressResult @@ -100,7 +100,8 @@ func (config *PartitionConfig) GetDbTableInfo(fromCron bool) (ptlist []ConfigDet `select TABLE_SCHEMA as TABLE_SCHEMA,TABLE_NAME as TABLE_NAME,CREATE_OPTIONS as CREATE_OPTIONS `+ ` from information_schema.tables where TABLE_SCHEMA like '%s' and TABLE_NAME like '%s';`, config.DbLike, config.TbLike) - var queryRequest = QueryRequest{[]string{address}, []string{sql}, true, 30, config.BkCloudId} + var queryRequest = QueryRequest{[]string{address}, []string{sql}, true, 30, + int(host.BkCloudId)} output, err = OneAddressExecuteSql(queryRequest) if err != nil { slog.Error("GetDbTableInfo", sql, err.Error()) @@ -118,7 +119,7 @@ func (config *PartitionConfig) GetDbTableInfo(fromCron bool) (ptlist []ConfigDet ` CONSTRAINT_TYPE in ('UNIQUE','PRIMARY KEY');`, config.DbLike, config.TbLike) queryRequest = QueryRequest{[]string{address}, []string{uniqueKeySql}, true, - 30, config.BkCloudId} + 30, int(host.BkCloudId)} hasUniqueKey, err := OneAddressExecuteSql(queryRequest) if err != nil { slog.Error("get", sql, err.Error()) @@ -138,7 +139,8 @@ func (config *PartitionConfig) GetDbTableInfo(fromCron bool) (ptlist []ConfigDet " information_schema.PARTITIONS where TABLE_SCHEMA like '%s' and TABLE_NAME like '%s' "+ " order by PARTITION_DESCRIPTION asc limit 2;", db, tb) - queryRequest = QueryRequest{[]string{address}, []string{sql}, true, 30, config.BkCloudId} + queryRequest = QueryRequest{[]string{address}, []string{sql}, true, 30, + int(host.BkCloudId)} output, err = OneAddressExecuteSql(queryRequest) if err != nil { slog.Error("GetDbTableInfo", sql, err.Error()) @@ -282,10 +284,10 @@ func CalculateInterval(firstName, secondName string, interval int) (bool, error) } // GetDropPartitionSql 生成删除分区的sql -func (m *ConfigDetail) GetDropPartitionSql() (string, error) { +func (m *ConfigDetail) GetDropPartitionSql(host Host) (string, error) { var sql, dropSql, fx string reserve := m.ReservedPartition * m.PartitionTimeInterval - address := fmt.Sprintf("%s:%d", m.ImmuteDomain, m.Port) + address := fmt.Sprintf("%s:%d", host.Ip, host.Port) base0 := fmt.Sprintf(`select PARTITION_NAME as PARTITION_NAME from INFORMATION_SCHEMA.PARTITIONS `+ `where TABLE_SCHEMA='%s' and TABLE_NAME='%s' and PARTITION_DESCRIPTION<`, m.DbName, m.TbName) base1 := "order by PARTITION_DESCRIPTION asc;" @@ -308,7 +310,7 @@ func (m *ConfigDetail) GetDropPartitionSql() (string, error) { } sql = fmt.Sprintf("%s %s %s", base0, fx, base1) var queryRequest = QueryRequest{Addresses: []string{address}, Cmds: []string{sql}, Force: true, QueryTimeout: 30, - BkCloudId: m.BkCloudId} + BkCloudId: int(host.BkCloudId)} output, err := OneAddressExecuteSql(queryRequest) if err != nil { return dropSql, err @@ -332,7 +334,7 @@ func (m *ConfigDetail) GetDropPartitionSql() (string, error) { } // GetInitPartitionSql 首次分区,自动分区 -func (m *ConfigDetail) GetInitPartitionSql(dbtype string, splitCnt int) (string, int, error) { +func (m *ConfigDetail) GetInitPartitionSql(dbtype string, splitCnt int, host Host) (string, int, error) { var sqlPartitionDesc []string var pkey, descKey, descFormat, initSql string var needSize, diff int @@ -397,7 +399,7 @@ func (m *ConfigDetail) GetInitPartitionSql(dbtype string, splitCnt int) (string, initSql = fmt.Sprintf("alter table `%s`.`%s` partition by %s (%s)", m.DbName, m.TbName, pkey, strings.Join(sqlPartitionDesc, ",")) } else { - needSize, err = m.CheckTableSize(splitCnt) + needSize, err = m.CheckTableSize(splitCnt, host) if err != nil { return initSql, needSize, err } @@ -418,9 +420,9 @@ func (m *ConfigDetail) GetInitPartitionSql(dbtype string, splitCnt int) (string, } // CheckTableSize TODO -func (m *ConfigDetail) CheckTableSize(splitCnt int) (int, error) { +func (m *ConfigDetail) CheckTableSize(splitCnt int, host Host) (int, error) { var needSize int - address := fmt.Sprintf("%s:%d", m.ImmuteDomain, m.Port) + address := fmt.Sprintf("%s:%d", host.Ip, host.Port) sql := fmt.Sprintf( "select TABLE_ROWS,(DATA_LENGTH+INDEX_LENGTH) as BYTES from information_schema.tables where TABLE_SCHEMA='%s' and TABLE_NAME='%s'", m.DbName, m.TbName) var queryRequest = QueryRequest{Addresses: []string{address}, Cmds: []string{sql}, Force: true, QueryTimeout: 30, @@ -437,17 +439,18 @@ func (m *ConfigDetail) CheckTableSize(splitCnt int) (int, error) { } else { return needSize, fmt.Errorf( "table %s.%s is not a partition table,and can not do auto alter partition, "+ - "because large than 100MB size or large than 1000000 rows", m.DbName, m.TbName) + "because large than %d size or large than %d rows", m.DbName, m.TbName, + viper.GetInt("pt.max_size"), viper.GetInt("pt.max_rows")) } } // GetAddPartitionSql 生成增加分区的sql -func (m *ConfigDetail) GetAddPartitionSql() (string, error) { +func (m *ConfigDetail) GetAddPartitionSql(host Host) (string, error) { var vsql, addSql, descKey, name, fx string var wantedDesc, wantedName, wantedDescIfOld, wantedNameIfOld string var diff, desc int var begin int - address := fmt.Sprintf("%s:%d", m.ImmuteDomain, m.Port) + address := fmt.Sprintf("%s:%d", host.Ip, host.Port) switch m.PartitionType { case 0: diff = DiffOneDay @@ -501,7 +504,7 @@ func (m *ConfigDetail) GetAddPartitionSql() (string, error) { "select count(*) as COUNT from INFORMATION_SCHEMA.PARTITIONS where TABLE_SCHEMA='%s' and TABLE_NAME='%s' "+ "and partition_description>= %s", m.DbName, m.TbName, fx) var queryRequest = QueryRequest{Addresses: []string{address}, Cmds: []string{vsql}, Force: true, QueryTimeout: 30, - BkCloudId: m.BkCloudId} + BkCloudId: int(host.BkCloudId)} output, err := OneAddressExecuteSql(queryRequest) if err != nil { return addSql, err @@ -527,7 +530,7 @@ func (m *ConfigDetail) GetAddPartitionSql() (string, error) { begin = -1 vsql = fmt.Sprintf(`select %s %s from INFORMATION_SCHEMA.PARTITIONS limit 1;`, wantedDescIfOld, wantedNameIfOld) queryRequest = QueryRequest{Addresses: []string{address}, Cmds: []string{vsql}, Force: true, QueryTimeout: 30, - BkCloudId: m.BkCloudId} + BkCloudId: int(host.BkCloudId)} output, err = OneAddressExecuteSql(queryRequest) if err != nil { return addSql, err @@ -608,100 +611,33 @@ func (m *ConfigDetail) NewPartitionNameDescType4(begin int, need int, name strin return sql, nil } -// GetSpiderBackends TODO -func GetSpiderBackends(address string, bkCloudId int) (tableDataType, int, error) { - var splitCnt int - var tdbctlPrimary string - // 查询tdbctl - dbctlSql := "select HOST,PORT,server_name as SPLIT_NUM, SERVER_NAME, WRAPPER from mysql.servers " + - "where wrapper='TDBCTL' and server_name like 'TDBCTL%' ;" - getTdbctlPrimary := "tdbctl get primary;" - queryRequest := QueryRequest{Addresses: []string{address}, Cmds: []string{dbctlSql}, Force: true, QueryTimeout: 30, - BkCloudId: bkCloudId} - output, err := OneAddressExecuteSql(queryRequest) - if err != nil { - return nil, splitCnt, fmt.Errorf("execute [%s] get spider info error: %s", dbctlSql, err.Error()) - } else if len(output.CmdResults[0].TableData) == 0 { - return nil, splitCnt, fmt.Errorf("no spider tdbctl found") - } - - // 查询tdbctl主节点 - for _, item := range output.CmdResults[0].TableData { - tdbctl := fmt.Sprintf("%s:%s", item["HOST"].(string), item["PORT"].(string)) - queryRequest = QueryRequest{Addresses: []string{tdbctl}, Cmds: []string{getTdbctlPrimary}, Force: true, - QueryTimeout: 30, BkCloudId: bkCloudId} - primary, err := OneAddressExecuteSql(queryRequest) - if err != nil { - slog.Warn(fmt.Sprintf("execute [%s] error: %s", getTdbctlPrimary, err.Error())) - continue - } - if len(primary.CmdResults[0].TableData) == 0 { - slog.Error(fmt.Sprintf("execute [%s] nothing return", getTdbctlPrimary)) - return nil, splitCnt, fmt.Errorf("execute [%s] nothing return", getTdbctlPrimary) - } - slog.Info("data:", primary.CmdResults[0].TableData) - tdbctlPrimary = primary.CmdResults[0].TableData[0]["SERVER_NAME"].(string) - break - } - if tdbctlPrimary == "" { - slog.Error(fmt.Sprintf("execute [%s] SERVER_NAME is null", getTdbctlPrimary)) - return nil, splitCnt, fmt.Errorf("execute [%s] SERVER_NAME is null", getTdbctlPrimary) - } - // 查询remote master各分片实例和tdbctl主节点 - splitSql := fmt.Sprintf("select HOST,PORT,replace(server_name,'SPT','') as SPLIT_NUM, SERVER_NAME, WRAPPER "+ - "from mysql.servers where wrapper in ('mysql','TDBCTL') and "+ - "(server_name like 'SPT%%' or server_name like '%s')", tdbctlPrimary) - queryRequest = QueryRequest{Addresses: []string{address}, Cmds: []string{splitSql}, Force: true, QueryTimeout: 30, - BkCloudId: bkCloudId} - output, err = OneAddressExecuteSql(queryRequest) - if err != nil { - return nil, splitCnt, fmt.Errorf("execute [%s] get spider remote and tdbctl master error: %s", splitSql, err.Error()) - } - // 查询一台remote机器上有多少个实例,用于评估存储空间 - cntSql := "select count(*) as COUNT from mysql.servers where WRAPPER='mysql' and " + - "SERVER_NAME like 'SPT%' group by host order by 1 desc limit 1;" - queryRequest = QueryRequest{Addresses: []string{address}, Cmds: []string{cntSql}, Force: true, QueryTimeout: 30, - BkCloudId: bkCloudId} - output1, err := OneAddressExecuteSql(queryRequest) - if err != nil { - return nil, splitCnt, fmt.Errorf("execute [%s] get spider split count error: %s", cntSql, err.Error()) +// CreatePartitionTicket 创建分区定时任务的单据 +func CreatePartitionTicket(flows []Info, ClusterType string, domain string, vdate string) error { + var ticketType string + var ticket Ticket + if ClusterType == Tendbha { + ticketType = "MYSQL_PARTITION_CRON" + } else if ClusterType == Tendbcluster { + ticketType = "TENDBCLUSTER_PARTITION_CRON" + } else { + return errno.NotSupportedClusterType } - splitCnt, _ = strconv.Atoi(output1.CmdResults[0].TableData[0]["COUNT"].(string)) - return output.CmdResults[0].TableData, splitCnt, nil -} - -// CreatePartitionTicket TODO -func CreatePartitionTicket(check Checker, objects []PartitionObject, zoneOffset int, date string, scheduler string) { - zone := fmt.Sprintf("%+03d:00", zoneOffset) - ticketType := "MYSQL_PARTITION" - if check.ClusterType == Tendbcluster { - ticketType = "TENDBCLUSTER_PARTITION" - } - ticket := Ticket{BkBizId: check.BkBizId, DbAppAbbr: check.DbAppAbbr, BkBizName: check.BkBizName, + ticket = Ticket{BkBizId: viper.GetInt("dba.bk_biz_id"), TicketType: ticketType, Remark: "auto partition", IgnoreDuplication: true, - Details: Detail{Infos: []Info{{check.ConfigId, check.ClusterId, check.ImmuteDomain, - *check.BkCloudId, objects}}}} + Details: Detail{Infos: flows}, CronDate: vdate, ImmuteDomain: domain} slog.Info("msg", "ticket info", fmt.Sprintf("%v", ticket)) - id, err := CreateDbmTicket(ticket) + _, err := CreateDbmTicket(ticket) if err != nil { - dimension := monitor.NewPartitionEventDimension(check.BkBizId, check.DbAppAbbr, check.BkBizName, - *check.BkCloudId, check.ImmuteDomain) - content := fmt.Sprintf("partition error. create ticket fail: %s", err.Error()) - monitor.SendEvent(monitor.PartitionEvent, dimension, content, "127.0.0.1") - slog.Error("msg", fmt.Sprintf("create ticket fail: %v", ticket), err) - _ = AddLog(check.ConfigId, check.BkBizId, check.ClusterId, *check.BkCloudId, - 0, check.ImmuteDomain, zone, date, scheduler, - content, CheckFailed, check.ClusterType) - return - } - _ = AddLog(check.ConfigId, check.BkBizId, check.ClusterId, *check.BkCloudId, id, check.ImmuteDomain, - zone, date, scheduler, "", ExecuteAsynchronous, check.ClusterType) + return err + } + return nil } // NeedPartition 获取需要实施的分区规则 -func NeedPartition(cronType string, clusterType string, zoneOffset int, cronDate string) ([]*Checker, error) { +func NeedPartition(cronType string, clusterType string, zoneOffset int, cronDate string) ([]*PartitionConfig, error) { var configTb, logTb string - var all, doNothing []*Checker + var doNothing []*Checker + switch clusterType { case Tendbha, Tendbsingle: configTb = MysqlPartitionConfig @@ -710,41 +646,36 @@ func NeedPartition(cronType string, clusterType string, zoneOffset int, cronDate configTb = SpiderPartitionConfig logTb = SpiderPartitionCronLogTable default: - return nil, errors.New("不支持的db类型") + return nil, errno.NotSupportedClusterType } vzone := fmt.Sprintf("%+03d:00", zoneOffset) // 集群被offline时,其分区规则也被禁用,规则不会被定时任务执行 - vsql := fmt.Sprintf( - "select id as config_id, bk_biz_id, db_app_abbr, bk_biz_name, "+ - "cluster_id, immute_domain, port, bk_cloud_id,"+ - " '%s' as cluster_type from `%s`.`%s` where time_zone='%s' and phase in ('%s','%s') order by 2,3;", - clusterType, viper.GetString("db.name"), configTb, vzone, online, offline) - slog.Info(vsql) - err := model.DB.Self.Raw(vsql).Scan(&all).Error + var all, need []*PartitionConfig + err := model.DB.Self.Table(configTb).Where("time_zone = ? and phase in (?,?)", vzone, online, offline).Scan(&all). + Error if err != nil { - slog.Error(vsql, "execute err", err) + slog.Error("msg", fmt.Sprintf("query %s err", configTb), err) return nil, err } - slog.Info("all", all) if cronType == "daily" { return all, nil } - vsql = fmt.Sprintf("select conf.id as config_id from `%s`.`%s` as conf,"+ + + vsql := fmt.Sprintf("select conf.id as config_id from `%s`.`%s` as conf,"+ "`%s`.`%s` as log where conf.id=log.config_id "+ "and conf.time_zone='%s' and log.cron_date='%s' and log.status like '%s'", viper.GetString("db.name"), configTb, viper.GetString("db.name"), - logTb, vzone, cronDate, CheckSucceeded) + logTb, vzone, cronDate, Success) slog.Info(vsql) err = model.DB.Self.Raw(vsql).Scan(&doNothing).Error if err != nil { slog.Error(vsql, "execute err", err) return nil, err } - var need []*Checker for _, item := range all { retryFlag := true for _, ok := range doNothing { - if (*item).ConfigId == (*ok).ConfigId { + if (*item).ID == (*ok).ConfigId { retryFlag = false break } @@ -756,49 +687,37 @@ func NeedPartition(cronType string, clusterType string, zoneOffset int, cronDate return need, nil } -// GetMaster TODO -func GetMaster(configs []*PartitionConfig, immuteDomain, clusterType string) ([]*PartitionConfig, error) { - newconfigs := make([]*PartitionConfig, len(configs)) - clusterInfo, err := GetCluster(Domain{immuteDomain}, clusterType) +// GetMaster 获取主库 +func GetMaster(immuteDomain, clusterType string) (Host, error) { + var host Host + cluster, err := GetCluster(Domain{immuteDomain}, clusterType) if err != nil { slog.Error("msg", "GetCluster err", err) - return nil, fmt.Errorf("GetCluster err: %s", err.Error()) + return host, fmt.Errorf("GetCluster err: %s", err.Error()) } - var masterIp string - var masterPort int - for _, storage := range clusterInfo.Storages { + for _, storage := range cluster.Storages { if storage.InstanceRole == Orphan || storage.InstanceRole == BackendMaster { - masterIp = storage.IP - masterPort = storage.Port - break + return Host{Ip: storage.IP, Port: storage.Port, BkCloudId: cluster.BkCloudId}, nil } } - - for k, v := range configs { - newconfig := *v - newconfig.ImmuteDomain = masterIp - newconfig.Port = masterPort - newconfigs[k] = &newconfig - } - return newconfigs, nil + return host, fmt.Errorf("not found master") } -// AddLog TODO -func AddLog(configId, bkBizId, clusterId, bkCloudId, ticketId int, immuteDomain, zone, date, scheduler, - info, checkStatus, clusterType string) error { +// AddLogBatch 批量添加日志 +func AddLogBatch(configs []PartitionConfig, date, scheduler, info, status, clusterType string) error { tx := model.DB.Self.Begin() tb := MysqlPartitionCronLogTable if clusterType == Tendbcluster { tb = SpiderPartitionCronLogTable } - log := &PartitionCronLog{ConfigId: configId, BkBizId: bkBizId, ClusterId: clusterId, TicketId: ticketId, - ImmuteDomain: immuteDomain, BkCloudId: bkCloudId, TimeZone: zone, CronDate: date, Scheduler: scheduler, - CheckInfo: info, Status: checkStatus} - err := tx.Debug().Table(tb).Create(log).Error - if err != nil { - tx.Rollback() - slog.Error("msg", "add cron log failed", err) - return err + for _, config := range configs { + log := &PartitionCronLog{ConfigId: config.ID, CronDate: date, Scheduler: scheduler, CheckInfo: info, Status: status} + err := tx.Debug().Table(tb).Create(log).Error + if err != nil { + tx.Rollback() + slog.Error("msg", "add cron log failed", err) + return err + } } tx.Commit() return nil diff --git a/dbm-services/mysql/db-partition/service/check_partition_object.go b/dbm-services/mysql/db-partition/service/check_partition_object.go index ab650b5243..1e51eaf0ed 100644 --- a/dbm-services/mysql/db-partition/service/check_partition_object.go +++ b/dbm-services/mysql/db-partition/service/check_partition_object.go @@ -25,7 +25,14 @@ type Checker struct { // PartitionSqlSet 分区语句集合 type PartitionSqlSet struct { Mu sync.RWMutex - PartitionSqls []PartitionSql + PartitionSqls []PartitionSql `json:"partition_sqls"` + Configs []PartitionConfig `json:"configs"` +} + +// ConfigSet 配置集合 +type ConfigSet struct { + Mu sync.RWMutex + Configs []PartitionConfig `json:"configs"` } // PartitionSql 实例ip:port上的分区语句 @@ -43,30 +50,23 @@ type PartitionSql struct { // PartitionCronLog 分区的定时任务日志表 type PartitionCronLog struct { - Id int `json:"id" gorm:"column:id;primary_key;auto_increment"` - BkBizId int `json:"bk_biz_id" gorm:"column:bk_biz_id"` - ClusterId int `json:"cluster_id" gorm:"column:cluster_id"` - ConfigId int `json:"config_id" gorm:"column:config_id"` - TicketId int `json:"ticket_id" gorm:"column:ticket_id"` - ImmuteDomain string `json:"immute_domain" gorm:"column:immute_domain"` - Scheduler string `json:"scheduler" gorm:"column:scheduler"` - BkCloudId int `json:"bk_cloud_id" gorm:"column:bk_cloud_id"` - TimeZone string `json:"time_zone" gorm:"column:time_zone"` - CronDate string `json:"cron_date" grom:"column:cron_date"` - CheckInfo string `json:"check_info" gorm:"column:check_info"` - Status string `json:"status" gorm:"column:status"` + Id int `json:"id" gorm:"column:id;primary_key;auto_increment"` + ConfigId int `json:"config_id" gorm:"column:config_id"` + Scheduler string `json:"scheduler" gorm:"column:scheduler"` + CronDate string `json:"cron_date" grom:"column:cron_date"` + CheckInfo string `json:"check_info" gorm:"column:check_info"` + Status string `json:"status" gorm:"column:status"` } // CreatePartitionCronLog 分区的定时任务日志表,区分集群类型 type CreatePartitionCronLog struct { - PartitionCronLog - ClusterType string `json:"cluster_type"` + Logs []PartitionCronLog `json:"logs"` + ClusterType string `json:"cluster_type"` } // PartitionLog 分区日志 type PartitionLog struct { Id int `json:"id"` - TicketId int `json:"ticket_id" gorm:"column:ticket_id"` ExecuteTime time.Time `json:"execute_time" gorm:"execute_time"` CheckInfo string `json:"check_info" gorm:"check_info"` Status string `json:"status" gorm:"status"` @@ -90,3 +90,9 @@ type Messages struct { mu sync.RWMutex list []string } + +type Host struct { + Ip string `json:"ip"` + Port int `json:"port"` + BkCloudId int64 `json:"bk_cloud_id"` +} diff --git a/dbm-services/mysql/db-partition/cron/cron.go b/dbm-services/mysql/db-partition/service/cron.go similarity index 57% rename from dbm-services/mysql/db-partition/cron/cron.go rename to dbm-services/mysql/db-partition/service/cron.go index 958d1cbd93..b48aeb3fe6 100644 --- a/dbm-services/mysql/db-partition/cron/cron.go +++ b/dbm-services/mysql/db-partition/service/cron.go @@ -1,5 +1,14 @@ -// Package cron TODO -package cron +/* + * TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-DB管理系统(BlueKing-BK-DBM) available. + * Copyright (C) 2017-2023 THL A29 Limited, a Tencent company. All rights reserved. + * Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at https://opensource.org/licenses/MIT + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ + +package service import ( "errors" @@ -17,11 +26,14 @@ import ( "github.com/spf13/viper" ) +// CronList 定时任务列表 var CronList []*cron.Cron // RegisterCron 注册定时任务 func RegisterCron() ([]*cron.Cron, error) { + // 每日首次执行分区 timingHour := viper.GetString("cron.timing_hour") + // 重试失败的分区规则 retryHour := viper.GetString("cron.retry_hour") if timingHour == "" || retryHour == "" { err := errors.New("cron.partition_hour or cron.retry_hour was not set") @@ -29,6 +41,7 @@ func RegisterCron() ([]*cron.Cron, error) { return CronList, err } timing := fmt.Sprintf("02 %s * * * ", timingHour) + // 可以配置每日多次重试 multiHours, errOuter := util.SplitName(retryHour) if errOuter != nil { errOuter = errors.New("cron.retry_hour format error") @@ -44,6 +57,20 @@ func RegisterCron() ([]*cron.Cron, error) { "UTC+9": 9, "UTC+10": 10, "UTC+11": 11, "UTC+12": 12, "UTC-11": -11, "UTC-10": -10, "UTC-9": -9, "UTC-8": -8, "UTC-7": -7, "UTC-6": -6, "UTC-5": -5, "UTC-4": -4, "UTC-3": -3, "UTC-2": -2, "UTC-1": -1, } + + // 设置告警的时区以及时间 + var alarmTimezone string + if _, isExists := timezone[viper.GetString("alarm.timezone")]; isExists == false { + alarmTimezone = "UTC+8" + } else { + alarmTimezone = viper.GetString("alarm.timezone") + } + alarmHour := viper.GetString("alarm.hour") + if alarmHour == "" { + alarmHour = multiHours[0] + } + alarmCron := fmt.Sprintf("02 %s * * * ", alarmHour) + // 为每个时区的分区规则创建对应的定时任务 for name, offset := range timezone { offetSeconds := offset * 60 * 60 zone := time.FixedZone(name, offetSeconds) @@ -54,6 +81,7 @@ func RegisterCron() ([]*cron.Cron, error) { } else { c = cron.New(cron.WithLocation(zone)) } + // 添加执行分区的定时任务 _, err := c.AddJob(timing, PartitionJob{CronType: Daily, ZoneOffset: offset, ZoneName: name, Hour: timingHour}) if err != nil { slog.Error("msg", "cron add daily job error", err) @@ -67,6 +95,15 @@ func RegisterCron() ([]*cron.Cron, error) { slog.Error("msg", "cron add retry job error", err) return CronList, err } + // 添加分区平台类告警,避免分区定时任务运行异常 + if alarmTimezone == name { + _, err = c.AddJob(alarmCron, PartitionJob{CronType: Alarm, ZoneOffset: offset, ZoneName: name, Hour: alarmHour}) + if err != nil { + slog.Error("msg", "cron add daily job error", err) + return CronList, err + } + } + // 启动分区定时任务 c.Start() slog.Info("msg", zone, c.Entries()) CronList = append(CronList, c) diff --git a/dbm-services/mysql/db-partition/service/cron_basic_func.go b/dbm-services/mysql/db-partition/service/cron_basic_func.go new file mode 100644 index 0000000000..a66bcc3c75 --- /dev/null +++ b/dbm-services/mysql/db-partition/service/cron_basic_func.go @@ -0,0 +1,632 @@ +/* + * TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-DB管理系统(BlueKing-BK-DBM) available. + * Copyright (C) 2017-2023 THL A29 Limited, a Tencent company. All rights reserved. + * Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at https://opensource.org/licenses/MIT + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ + +package service + +import ( + "context" + "dbm-services/mysql/db-partition/util" + "encoding/json" + "fmt" + "log/slog" + "os" + "strconv" + "strings" + "sync" + "time" + + "golang.org/x/time/rate" + + "dbm-services/mysql/db-partition/model" + "dbm-services/mysql/db-partition/monitor" +) + +// Run 执行Job +func (m PartitionJob) Run() { + var err error + var key string + offetSeconds := m.ZoneOffset * 60 * 60 + zone := time.FixedZone(m.ZoneName, offetSeconds) + m.CronDate = time.Now().In(zone).Format("20060102") + key = fmt.Sprintf("%s_%s_%d_%s", m.CronType, m.Hour, m.ZoneOffset, m.CronDate) + flag, err := model.Lock(key) + if err != nil { + msg := "partition error. set redis mutual exclusion error" + SendMonitor(msg, err) + slog.Error("msg", msg, err) + } else if flag { + m.ExecuteTendbhaPartition() + m.ExecuteTendbclusterPartition() + if m.CronType == Alarm { + // 巡检最近3天执行失败的或者未执行的 + CheckLogSendMonitor(Tendbha, m.CronDate) + CheckLogSendMonitor(Tendbcluster, m.CronDate) + } + } else { + slog.Warn("set redis mutual exclusion fail, do nothing", "key", key) + } +} + +// ExecuteTendbhaPartition 执行tendbha的分区 +func (m PartitionJob) ExecuteTendbhaPartition() { + slog.Info("do ExecuteTendbhaPartition") + timeStr := time.Now().Format(time.RFC3339) + needMysql, errOuter := NeedPartition(m.CronType, Tendbha, m.ZoneOffset, m.CronDate) + if errOuter != nil { + msg := "partition error. get need partition list fail" + SendMonitor(msg, errOuter) + slog.Error("msg", msg, errOuter) + return + } + // 找到distinct业务distinct业务、distinct集群、集群和业务的所属关系、规则和集群的所属关系 + var uniqBiz = make(map[int64]struct{}) + fromCron := true + for _, need := range needMysql { + if _, isExists := uniqBiz[need.BkBizId]; isExists == false { + uniqBiz[need.BkBizId] = struct{}{} + } + } + // 规则属于哪个集群 + var clusterConfigs = make(map[int][]*PartitionConfig) + for _, need := range needMysql { + clusterConfigs[need.ClusterId] = append(clusterConfigs[need.ClusterId], need) + } + // 集群的主库实例 + master := make(map[int64]Host) + // 集群的主库机器 + uniqHost := make(map[string][]int64) + master, uniqHost, errOuter = GetHostAndMaster(uniqBiz) + if errOuter != nil { + return + } + slog.Info("msg", "master", master, "host", uniqHost) + // 需要下载dbactor的机器 + var cloudMachineList = make(map[int64][]string) + var machineFileName = make(map[string]string) + for host, clusters := range uniqHost { + tmp := strings.Split(host, "|") + ip := tmp[0] + cloud, _ := strconv.ParseInt(tmp[1], 10, 64) + var objects []PartitionObject + for _, cluster := range clusters { + port := master[cluster].Port + // 获取需要执行的分区语句,哪些分区规则不需要执行 + sqls, _, nothingToDo, err := CheckPartitionConfigs(clusterConfigs[int(cluster)], "mysql", + 1, fromCron, Host{Ip: ip, Port: port, BkCloudId: cloud}) + if err != nil { + msg := "get partition sql fail" + SendMonitor(msg, err) + slog.Error("msg", msg, err) + break + } + if len(sqls) > 0 { + slog.Info("msg", "sql", sqls) + objects = append(objects, PartitionObject{Ip: ip, Port: port, ShardName: "null", + ExecuteObjects: sqls}) + } + // 检查不需要执行语句,记录的分区日志中 + if len(nothingToDo) > 0 { + err = AddLogBatch(nothingToDo, m.CronDate, Scheduler, + "nothing to do", Success, Tendbha) + if err != nil { + msg := "add log fail" + SendMonitor(msg, err) + slog.Error("msg", msg, err) + break + } + } + } + if len(objects) == 0 { + continue + } + slog.Info("msg", "objects", objects) + filename := fmt.Sprintf("partition_%s_%s_%s_%s.json", ip, m.CronDate, m.CronType, timeStr) + err := UploadObejct(objects, filename) + if err != nil { + continue + } + cloudMachineList[cloud] = append(cloudMachineList[cloud], ip) + machineFileName[host] = filename + slog.Info("machineFileName", "host", host, "filename", filename) + } + slog.Info("test", "cloudMachineList", cloudMachineList, "machineFileName", machineFileName) + DownLoadFilesCreateTicketByMachine(cloudMachineList, machineFileName, Tendbha, m.CronDate) +} + +// ExecuteTendbclusterPartition 执行tendbcluster的分区 +func (m PartitionJob) ExecuteTendbclusterPartition() { + slog.Info("do ExecuteTendbclusterPartition") + fromCron := true + timeStr := time.Now().Format(time.RFC3339) + needMysql, errOuter := NeedPartition(m.CronType, Tendbcluster, m.ZoneOffset, m.CronDate) + if errOuter != nil { + msg := "partition error. get need partition list fail" + SendMonitor(msg, errOuter) + slog.Error("msg", msg, errOuter) + return + } + // 规则属于哪个集群 + var clusterConfigs = make(map[string][]*PartitionConfig) + for _, need := range needMysql { + cluster := fmt.Sprintf("%s|%d|%d", need.ImmuteDomain, need.Port, need.BkCloudId) + clusterConfigs[cluster] = append(clusterConfigs[cluster], need) + } + // 需要下载dbactor的机器 + var machineFileName = make(map[string]string) + var clusterIps = make(map[string][]string) + for cluster := range clusterConfigs { + // 获取集群结构 + hostNodes, splitCnt, err := GetTendbclusterInstances(cluster) + if err != nil { + msg := fmt.Sprintf("get tendbcluster %s nodes error", cluster) + SendMonitor(msg, err) + slog.Error("msg", msg, err) + continue + } + slog.Info("spider struct", "hostNodes", hostNodes, "splitCnt", splitCnt) + var nothing []PartitionConfig + var doSomething []PartitionConfig + var objects []PartitionObject + configs := clusterConfigs[cluster] + for host, instances := range hostNodes { + tmp := strings.Split(host, "|") + ip := tmp[0] + for _, ins := range instances { + newconfigs := make([]*PartitionConfig, len(configs)) + for k, v := range configs { + newconfig := *v + if ins.Wrapper == "mysql" { + newconfig.DbLike = fmt.Sprintf("%s_%s", newconfig.DbLike, ins.SplitNum) + } + newconfigs[k] = &newconfig + } + // 获取一个集群中的一个实例需要执行的所有分区语句 + sqls, do, nothingToDo, errInner := CheckPartitionConfigs(newconfigs, ins.Wrapper, + splitCnt, fromCron, Host{Ip: ins.Ip, Port: ins.Port, BkCloudId: int64(ins.Cloud)}) + if errInner != nil { + msg := "get partition sql fail" + SendMonitor(msg, errInner) + slog.Error("msg", msg, errInner) + break + } + if len(sqls) > 0 { + objects = append(objects, PartitionObject{Ip: ins.Ip, Port: ins.Port, ShardName: ins.ServerName, + ExecuteObjects: sqls}) + } + nothing = append(nothing, nothingToDo...) + doSomething = append(doSomething, do...) + } + // 需要执行的规则与不需要执行的均记录到日志中,即使记录日志失败,仍然继续执行分区 + NeedExecuteList(doSomething, nothing, m.CronDate, Tendbcluster) + if len(objects) == 0 { + continue + } + filename := fmt.Sprintf("partition_%s_%s_%s_%s.json", ip, m.CronDate, m.CronType, timeStr) + err = UploadObejct(objects, filename) + if err != nil { + continue + } + clusterIps[cluster] = append(clusterIps[cluster], ip) + machineFileName[host] = filename + slog.Info("clusterIps", "cluster", cluster, "ip", ip) + slog.Info("machineFileName", "host", host, "filename", filename) + } + slog.Info("test", "cloudMachineList", clusterIps, "machineFileName", machineFileName) + } + DownLoadFilesCreateTicketByCluster(clusterIps, machineFileName, Tendbcluster, m.CronDate) +} + +// NeedExecuteList spider集群需要多个节点在执行分区规则,只有所有节点均不需要执行sql,才不不需要下发分区任务 +func NeedExecuteList(doSomething []PartitionConfig, nothing []PartitionConfig, vdate, clusterType string) { + var uniq = make(map[int]struct{}) + var doList []PartitionConfig + var doIds []int + var uniqNothing = make(map[int]struct{}) + var nothingList []PartitionConfig + + for _, item := range doSomething { + if _, isExists := uniq[item.ID]; isExists == false { + uniq[item.ID] = struct{}{} + doList = append(doList, item) + doIds = append(doIds, item.ID) + } + } + for _, item := range nothing { + if _, isExists := uniqNothing[item.ID]; isExists == false { + uniqNothing[item.ID] = struct{}{} + if !util.HasElem(item.ID, doIds) { + nothingList = append(nothingList, item) + } + } + } + err := AddLogBatch(nothingList, vdate, Scheduler, "nothing to do", Success, clusterType) + if err != nil { + msg := "add log fail" + SendMonitor(msg, err) + slog.Error("msg", msg, err) + } +} + +// UploadObejct 分区结构转换为文件上传到介质中心 +func UploadObejct(objects []PartitionObject, filename string) error { + err := ObjectToFile(objects, filename) + if err != nil { + msg := fmt.Sprintf("object to file %s fail", filename) + SendMonitor(msg, err) + slog.Error("msg", msg, err) + return err + } + // 上传到介质中心 + resp, err := UploadDirectToBkRepo(filename) + if err != nil { + msg := fmt.Sprintf("upload %s to bkrepo error", filename) + SendMonitor(msg, err) + slog.Error("msg", msg, err) + return err + } + if resp.Code != 0 { + msg := fmt.Sprintf( + "upload %s to bkrepo respone error. respone code is %d,respone msg:%s,traceId:%s", + filename, + resp.Code, + resp.Message, + resp.RequestId, + ) + SendMonitor(msg, fmt.Errorf("upload error")) + slog.Error("msg", msg) + return fmt.Errorf("upload error") + } + _ = os.Remove(filename) + return nil +} + +// GetHostAndMaster 获取tendbha主实例和主实例所在的机器 +func GetHostAndMaster(uniqBiz map[int64]struct{}) (map[int64]Host, map[string][]int64, error) { + var master = make(map[int64]Host) + var uniqHost = make(map[string][]int64) + for biz := range uniqBiz { + clusters, err := GetAllClustersInfo(BkBizId{BkBizId: biz}) + if err != nil { + msg := "get cluster from dbmeta/priv_manager/biz_clusters error" + SendMonitor(msg, err) + slog.Error("msg", msg, err) + return nil, nil, err + } + for _, cluster := range clusters { + if cluster.ClusterType == Tendbha || cluster.ClusterType == Tendbsingle { + for _, storage := range cluster.Storages { + if storage.InstanceRole == Orphan || storage.InstanceRole == BackendMaster { + master[cluster.Id] = Host{Ip: storage.IP, Port: storage.Port, BkCloudId: cluster.BkCloudId} + tmp := fmt.Sprintf("%s|%d", storage.IP, cluster.BkCloudId) + uniqHost[tmp] = append(uniqHost[tmp], cluster.Id) + break + } + } + } + } + } + return master, uniqHost, nil +} + +// GetTendbclusterInstances 获取tendbcluster中的节点 +func GetTendbclusterInstances(cluster string) (map[string][]SpiderNode, int, error) { + tmp := strings.Split(cluster, "|") + domain := tmp[0] + port, _ := strconv.ParseInt(tmp[1], 10, 64) + cloud, _ := strconv.Atoi(tmp[2]) + address := fmt.Sprintf("%s:%d", domain, port) + var splitCnt int + var tdbctlPrimary string + // 查询tdbctl + dbctlSql := "select HOST,PORT,server_name as SPLIT_NUM, SERVER_NAME, WRAPPER from mysql.servers " + + "where wrapper='TDBCTL' and server_name like 'TDBCTL%' ;" + getTdbctlPrimary := "tdbctl get primary;" + queryRequest := QueryRequest{Addresses: []string{address}, Cmds: []string{dbctlSql}, Force: true, QueryTimeout: 30, + BkCloudId: cloud} + output, err := OneAddressExecuteSql(queryRequest) + if err != nil { + return nil, splitCnt, fmt.Errorf("execute [%s] get spider info error: %s", dbctlSql, err.Error()) + } else if len(output.CmdResults[0].TableData) == 0 { + return nil, splitCnt, fmt.Errorf("no spider tdbctl found") + } + + // 查询tdbctl主节点 + for _, item := range output.CmdResults[0].TableData { + tdbctl := fmt.Sprintf("%s:%s", item["HOST"].(string), item["PORT"].(string)) + queryRequest = QueryRequest{Addresses: []string{tdbctl}, Cmds: []string{getTdbctlPrimary}, Force: true, + QueryTimeout: 30, BkCloudId: cloud} + primary, err := OneAddressExecuteSql(queryRequest) + if err != nil { + slog.Warn(fmt.Sprintf("execute [%s] error: %s", getTdbctlPrimary, err.Error())) + continue + } + if len(primary.CmdResults[0].TableData) == 0 { + slog.Error(fmt.Sprintf("execute [%s] nothing return", getTdbctlPrimary)) + return nil, splitCnt, fmt.Errorf("execute [%s] nothing return", getTdbctlPrimary) + } + slog.Info("data:", primary.CmdResults[0].TableData) + tdbctlPrimary = primary.CmdResults[0].TableData[0]["SERVER_NAME"].(string) + break + } + if tdbctlPrimary == "" { + slog.Error(fmt.Sprintf("execute [%s] SERVER_NAME is null", getTdbctlPrimary)) + return nil, splitCnt, fmt.Errorf("execute [%s] SERVER_NAME is null", getTdbctlPrimary) + } + // 查询remote master各分片实例和tdbctl主节点 + splitSql := fmt.Sprintf("select HOST,PORT,replace(server_name,'SPT','') as SPLIT_NUM, SERVER_NAME, WRAPPER "+ + "from mysql.servers where wrapper in ('mysql','TDBCTL') and "+ + "(server_name like 'SPT%%' or server_name like '%s')", tdbctlPrimary) + queryRequest = QueryRequest{Addresses: []string{address}, Cmds: []string{splitSql}, Force: true, QueryTimeout: 30, + BkCloudId: cloud} + output, err = OneAddressExecuteSql(queryRequest) + if err != nil { + return nil, splitCnt, fmt.Errorf("execute [%s] get spider remote and tdbctl master error: %s", splitSql, err.Error()) + } + // 查询一台remote机器上有多少个实例,用于评估存储空间 + cntSql := "select count(*) as COUNT from mysql.servers where WRAPPER='mysql' and " + + "SERVER_NAME like 'SPT%' group by host order by 1 desc limit 1;" + queryRequest = QueryRequest{Addresses: []string{address}, Cmds: []string{cntSql}, Force: true, QueryTimeout: 30, + BkCloudId: cloud} + output1, err := OneAddressExecuteSql(queryRequest) + if err != nil { + return nil, splitCnt, fmt.Errorf("execute [%s] get spider split count error: %s", cntSql, err.Error()) + } + splitCnt, _ = strconv.Atoi(output1.CmdResults[0].TableData[0]["COUNT"].(string)) + var hostNodes = make(map[string][]SpiderNode) + for _, item := range output.CmdResults[0].TableData { + vip := item["HOST"].(string) + vport, _ := strconv.Atoi(item["PORT"].(string)) + vslitnum := item["SPLIT_NUM"].(string) + wrapper := item["WRAPPER"].(string) + serverName := item["SERVER_NAME"].(string) + vhost := fmt.Sprintf("%s|%d", vip, cloud) + hostNodes[vhost] = append(hostNodes[vhost], SpiderNode{vip, vport, + cloud, vslitnum, wrapper, serverName}) + } + return hostNodes, splitCnt, nil +} + +// DownLoadFilesCreateTicketByMachine tendbha按照机器粒度下载文件、创建分区单据 +func DownLoadFilesCreateTicketByMachine(cloudMachineList map[int64][]string, machineFileName map[string]string, + clusterType string, vdate string) { + var wg sync.WaitGroup + limit := rate.Every(time.Second * 20) + burst := 5 // 桶容量 5 + limiter := rate.NewLimiter(limit, burst) + + for cloud, machines := range cloudMachineList { + tmp := util.SplitArray(machines, 20) + for _, ips := range tmp { + wg.Add(1) + go func(cloud int64, ips []string) { + defer func() { + wg.Done() + }() + err := limiter.Wait(context.Background()) + if err != nil { + msg := "dbmeta/apis/v1/flow/scene/download_dbactor/ error" + SendMonitor(msg, err) + slog.Error("msg", msg, err) + return + } + // 按照机器下载好dbactor + err = DownloadDbactor(cloud, ips) + // dbactor下载失败,可以继续执行分区的单据,机器上可能已经存在dbactor + if err != nil { + dimension := monitor.NewDeveloperEventDimension(Scheduler, monitor.PartitionCron) + content := fmt.Sprintf("%v download dbactor fail: %s", ips, err.Error()) + monitor.SendEvent(dimension, content, Scheduler) + slog.Error("msg", "download dbactor fail. "+ + "dbmeta/apis/v1/flow/scene/download_dbactor/ error", err) + } + var files []Info + for _, ip := range ips { + files = append(files, Info{BkCloudId: cloud, Ip: ip, + FileName: machineFileName[fmt.Sprintf("%s|%d", ip, cloud)]}) + } + // 下载分区文件 + err = DownloadFiles(files) + if err != nil { + msg := "download partition file error" + SendMonitor(msg, err) + slog.Error("msg", msg, err) + return + } + time.Sleep(60 * time.Second) + // 创建执行分区单据 + err = CreatePartitionTicket(files, clusterType, "", vdate) + if err != nil { + msg := "create ticket error" + SendMonitor(msg, err) + slog.Error("msg", msg, err) + return + } + }(cloud, ips) + } + } + wg.Wait() +} + +// DownLoadFilesCreateTicketByCluster tendbcluster按照集群粒度下载文件,执行分区规则 +func DownLoadFilesCreateTicketByCluster(clusterIps map[string][]string, machineFileName map[string]string, + clusterType string, vdate string) { + var wg sync.WaitGroup + limit := rate.Every(time.Second * 20) + burst := 5 // 桶容量 5 + limiter := rate.NewLimiter(limit, burst) + + for cluster, machines := range clusterIps { + vcluster := strings.Split(cluster, "|") + domain := vcluster[0] + cloud, _ := strconv.Atoi(vcluster[2]) + wg.Add(1) + var clusterFiles []Info + go func(domain string, cloud int, machines []string) { + defer func() { + wg.Done() + }() + err := limiter.Wait(context.Background()) + if err != nil { + msg := "get token error" + SendMonitor(msg, err) + slog.Error("msg", msg, err) + return + } + tmp := util.SplitArray(machines, 20) + for _, ips := range tmp { + // 按照机器下载好dbactor + err = DownloadDbactor(int64(cloud), ips) + // dbactor下载失败,可以继续执行分区的单据,机器上可能已经存在dbactor + if err != nil { + dimension := monitor.NewDeveloperEventDimension(Scheduler, monitor.PartitionCron) + content := fmt.Sprintf("%v download dbactor fail: %s", ips, err.Error()) + monitor.SendEvent(dimension, content, Scheduler) + slog.Error("msg", "download dbactor fail. "+ + "dbmeta/apis/v1/flow/scene/download_dbactor/ error", err) + } + var files []Info + for _, ip := range ips { + files = append(files, Info{BkCloudId: int64(cloud), Ip: ip, + FileName: machineFileName[fmt.Sprintf("%s|%d", ip, cloud)]}) + } + // 下载分区文件 + err = DownloadFiles(files) + if err != nil { + msg := "download partition file error" + SendMonitor(msg, err) + slog.Error("msg", msg, err) + return + } + clusterFiles = append(clusterFiles, files...) + } + time.Sleep(60 * time.Second) + // 创建执行分区单据 + err = CreatePartitionTicket(clusterFiles, clusterType, domain, vdate) + if err != nil { + msg := "create ticket error" + SendMonitor(msg, err) + slog.Error("msg", msg, err) + return + } + }(domain, cloud, machines) + } + wg.Wait() +} + +// SendMonitor 发送日志 +func SendMonitor(msg string, err error) { + dimension := monitor.NewDeveloperEventDimension(Scheduler, monitor.PartitionCron) + content := fmt.Sprintf("%s: %s", msg, err.Error()) + monitor.SendEvent(dimension, content, Scheduler) +} + +// ObjectToFile 分区结构体转换为文件 +func ObjectToFile(objects []PartitionObject, filename string) error { + b, err := json.Marshal(objects) + if err != nil { + msg := "json.Marshal error" + SendMonitor(msg, err) + slog.Error("msg", msg, err) + return err + } + inputFile, err := os.OpenFile(filename, + os.O_CREATE|os.O_RDWR, 0644) + if err != nil { + msg := fmt.Sprintf("create file %s error", filename) + SendMonitor(msg, err) + slog.Error("msg", msg, err) + return err + } + if _, err = inputFile.Write(b); err != nil { + _ = inputFile.Close() + _ = os.Remove(filename) + msg := fmt.Sprintf("write file %s error", filename) + SendMonitor(msg, err) + slog.Error("msg", msg, err) + return err + } + return nil +} + +// CheckLogSendMonitor 检查分区日志,连续多少天失败或者没有执行,告知平台负责人 +func CheckLogSendMonitor(clusterType string, cronDate string) { + // 判断是mysql集群还是spider集群 + var tb, log string + switch clusterType { + case Tendbha, Tendbsingle: + tb = MysqlPartitionConfig + log = MysqlPartitionCronLogTable + case Tendbcluster: + tb = SpiderPartitionConfig + log = SpiderPartitionCronLogTable + default: + return + } + + // 查到所有的规则,去除近xxx天曾经成功执行过的分区规则,其余告警 + var all []*PartitionConfig + var faildays []PartitionConfig + continueDays := 7 + err := model.DB.Self.Table(tb).Where(fmt.Sprintf( + "phase in (?,?) and create_time> date_sub(now(), interval %d day)", continueDays), + online, offline).Scan(&all).Error + if err != nil { + msg := "send monitor error. get partition configs error" + SendMonitor(msg, err) + slog.Error("msg", msg, err) + return + } + type Ids struct { + ID int `json:"id" gorm:"column:id;primary_key;auto_increment"` + } + var succeeded []*Ids + vsql := fmt.Sprintf("select distinct(id) as id from %s where status like '%s' "+ + "and cron_date > date_sub(now(), interval %d day)", log, Success, continueDays) + err = model.DB.Self.Raw(vsql).Scan(&succeeded).Error + if err != nil { + msg := "send monitor error. get partition logs error" + SendMonitor(msg, err) + slog.Error("msg", msg, err) + return + } + var clusterTable = make(map[string]string) + var uniqCluster = make(map[string]struct{}) + + for _, item := range all { + failFlag := true + for _, ok := range succeeded { + if (*item).ID == (*ok).ID { + failFlag = false + break + } + } + if failFlag == true { + clusterTable[item.ImmuteDomain] = fmt.Sprintf("%s[%s.%s]", clusterTable[item.ImmuteDomain], + item.DbLike, item.TbLike) + if _, isExists := uniqCluster[item.ImmuteDomain]; isExists == false { + uniqCluster[item.ImmuteDomain] = struct{}{} + } + faildays = append(faildays, *item) + } + } + err = AddLogBatch(faildays, cronDate, Scheduler, + fmt.Sprintf("partition failed or not be run for %d days", continueDays), Fail, clusterType) + if err != nil { + msg := "add log fail" + SendMonitor(msg, err) + slog.Error("msg", msg, err) + } + for domain := range uniqCluster { + content := fmt.Sprintf("partition failed for %d days: %s", continueDays, clusterTable[domain]) + dimension := monitor.NewDeveloperEventDimension(Scheduler, domain) + slog.Info("monitor", "content", content, "dimension", dimension) + monitor.SendEvent(dimension, content, Scheduler) + } +} diff --git a/dbm-services/mysql/db-partition/service/cron_object.go b/dbm-services/mysql/db-partition/service/cron_object.go new file mode 100644 index 0000000000..297d4f1c28 --- /dev/null +++ b/dbm-services/mysql/db-partition/service/cron_object.go @@ -0,0 +1,59 @@ +package service + +// Daily TODO +const Daily = "daily" + +// Retry TODO +const Retry = "retry" + +const Alarm = "alarm" + +const Scheduler = "127.0.0.1" + +// PartitionJob TODO +type PartitionJob struct { + ClusterType string `json:"cluster_type"` + CronType string `json:"cron_type"` + ZoneOffset int `json:"zone_offset"` + ZoneName string `json:"zone_name"` + CronDate string `json:"cron_date"` + Hour string `json:"hour"` +} + +// TendbhaRelation tendbha机器与分区配置关系 +type TendbhaRelation struct { + Machine string `json:"machine"` + ClusterConfigs []ClusterConfigs `json:"cluster_configs"` +} + +// ClusterConfigs 集群信息与其分区配置 +type ClusterConfigs struct { + ClusterId int `json:"cluster_id"` + Master Host `json:"master"` + Configs []*PartitionConfig `json:"configs"` +} + +// TendbClusterRelation tendbha集群信息与分区规则 +type TendbClusterRelation struct { + Cluster []int `json:"cluster"` + Machines []string `json:"machines"` + Rules []*Checker `json:"rules"` +} + +// TendbhaRelationBiz 业务、机器与分区配置所属关系 +type TendbhaRelationBiz struct { + BkBizId int64 `json:"bk_biz_id"` + Relations []TendbhaRelation `json:"relations"` +} + +// SpiderNode tendbcluster节点信息 +type SpiderNode struct { + Ip string `json:"ip"` + Port int `json:"port"` + Cloud int `json:"cloud"` + // 分片编号 + SplitNum string `json:"split_num"` + // spider实例类型 mysql or TDBCTL + Wrapper string `json:"wrapper"` + ServerName string `json:"server_name"` +} diff --git a/dbm-services/mysql/db-partition/service/db_meta_service.go b/dbm-services/mysql/db-partition/service/db_meta_service.go index 0909243a63..cc513963a9 100644 --- a/dbm-services/mysql/db-partition/service/db_meta_service.go +++ b/dbm-services/mysql/db-partition/service/db_meta_service.go @@ -30,11 +30,9 @@ const Tendbsingle string = "tendbsingle" // Tendbcluster TODO const Tendbcluster string = "tendbcluster" -// CheckFailed TODO -const CheckFailed string = "FAILED" +const Fail string = "failed" -// CheckSucceeded TODO -const CheckSucceeded string = "SUCCEEDED" +const Success string = "succeeded" // ExecuteAsynchronous TODO const ExecuteAsynchronous string = "UNKNOWN" @@ -156,11 +154,18 @@ type DownloadPara struct { CreatedBy string `json:"created_by"` } +type DownloadPartitionPara struct { + TicketType string `json:"ticket_type"` + BkBizId int64 `json:"bk_biz_id"` + Files []Info `json:"files"` + CreatedBy string `json:"created_by"` + Path string `json:"path"` +} + // DownloadDbactor 下载dbactor func DownloadDbactor(bkCloudId int64, ips []string) error { c := util.NewClientByHosts(viper.GetString("db_meta_service")) url := "/apis/v1/flow/scene/download_dbactor" - _, err := c.Do(http.MethodPost, url, DownloadPara{TicketType: "download_dbactor", BkBizId: viper.GetInt64("dba.bk_biz_id"), BkCloudId: bkCloudId, DbType: "mysql", Ips: ips, CreatedBy: "admin"}) @@ -171,6 +176,20 @@ func DownloadDbactor(bkCloudId int64, ips []string) error { return nil } +func DownloadFiles(files []Info) error { + path := "mysql/partition" + c := util.NewClientByHosts(viper.GetString("db_meta_service")) + url := "/apis/v1/flow/scene/download_file" + _, err := c.Do(http.MethodPost, url, DownloadPartitionPara{TicketType: "download_file", + BkBizId: viper.GetInt64("dba.bk_biz_id"), + Files: files, CreatedBy: "admin", Path: path}) + if err != nil { + slog.Error("msg", url, err) + return errno.DownloadFileFail.Add(err.Error()) + } + return nil +} + // GetCluster 根据域名获取集群信息 func GetCluster(dns Domain, ClusterType string) (Instance, error) { c := util.NewClientByHosts(viper.GetString("db_meta_service")) diff --git a/dbm-services/mysql/db-partition/service/execute_partition_object.go b/dbm-services/mysql/db-partition/service/execute_partition_object.go index 385af434fc..5c412a91a1 100644 --- a/dbm-services/mysql/db-partition/service/execute_partition_object.go +++ b/dbm-services/mysql/db-partition/service/execute_partition_object.go @@ -42,11 +42,8 @@ type PartitionConfigWithLog struct { PartitionConfig // 这里故意设置为string而不是time.Time,因为当值为null会被转换为1-01-01 08:00:00 ExecuteTime string `json:"execute_time" gorm:"execute_time"` - TicketId int `json:"ticket_id" gorm:"ticket_id"` // 分区任务的状态 Status string `json:"status" gorm:"status"` - // 分区单据的状态 - TicketStatus string `json:"ticket_status" gorm:"ticket_status"` // 分区检查的结果 CheckInfo string `json:"check_info" gorm:"check_info"` } @@ -64,12 +61,12 @@ type ConfigDetail struct { // Ticket 分区单据 type Ticket struct { BkBizId int `json:"bk_biz_id"` - DbAppAbbr string `json:"db_app_abbr"` - BkBizName string `json:"bk_biz_name"` TicketType string `json:"ticket_type"` Remark string `json:"remark"` IgnoreDuplication bool `json:"ignore_duplication"` Details Detail `json:"details"` + ImmuteDomain string `json:"immute_domain"` + CronDate string `json:"cron_date"` } // Details 单据参数 @@ -78,6 +75,18 @@ type Details struct { Clusters ClustersResponse `json:"clusters"` } +// Detail 用于创建单据 +type Detail struct { + Infos []Info `json:"infos"` +} + +// Info 用于创建单据 +type Info struct { + BkCloudId int64 `json:"bk_cloud_id"` + Ip string `json:"ip"` + FileName string `json:"file_name"` +} + // ClustersResponse 用于创建单据 type ClustersResponse struct { ClusterResponse map[string]ClusterResponse `json:"cluster_response"` @@ -103,20 +112,6 @@ type ClusterResponse struct { ClusterTypeName string `json:"cluster_type_name"` } -// Detail 用于创建单据 -type Detail struct { - Infos []Info `json:"infos"` -} - -// Info 用于创建单据 -type Info struct { - ConfigId int `json:"config_id"` - ClusterId int `json:"cluster_id"` - ImmuteDomain string `json:"immute_domain"` - BkCloudId int `json:"bk_cloud_id"` - PartitionObjects []PartitionObject `json:"partition_objects"` -} - // PartitionObject 待执行的分区语句集合 type PartitionObject struct { Ip string `json:"ip"` diff --git a/dbm-services/mysql/db-partition/service/manage_config.go b/dbm-services/mysql/db-partition/service/manage_config.go index 1311196ed9..d7af2891ca 100644 --- a/dbm-services/mysql/db-partition/service/manage_config.go +++ b/dbm-services/mysql/db-partition/service/manage_config.go @@ -96,7 +96,7 @@ func (m *QueryParititionsInput) GetPartitionsConfig() ([]*PartitionConfigWithLog 三、ticket_id是NULL,status是NULL,分区规则还没有执行过 */ vsql = fmt.Sprintf("SELECT config.*, logs.create_time as execute_time, "+ - "logs.ticket_id as ticket_id, logs.check_info as check_info, "+ + "logs.check_info as check_info, "+ "logs.status as status FROM "+ "(select * from %s where %s ) AS config LEFT JOIN "+ "(SELECT log.* FROM %s AS log, "+ @@ -143,7 +143,7 @@ func (m *QueryLogInput) GetPartitionLog() ([]*PartitionLog, int64, error) { return nil, 0, err } - vsql = fmt.Sprintf("select id, ticket_id, create_time as execute_time, "+ + vsql = fmt.Sprintf("select id, create_time as execute_time, "+ "check_info, status from %s where %s order by execute_time desc %s", logTb, where, limitCondition) err = model.DB.Self.Raw(vsql).Scan(&allResults).Error @@ -307,7 +307,7 @@ func (m *CreatePartitionsInput) CreatePartitionsConfig() (error, []int) { if err != nil { return err, []int{} } - warnings2, err := m.compareWithExistDB(tbName) + warnings2, err := m.CompareWithExistDB(tbName) if err != nil { return err, []int{} } @@ -633,8 +633,8 @@ func (m *CreatePartitionsInput) compareWithSameArray() (warnings []string, err e return warnings, nil } -// compareWithExistDB 检查重复库表 -func (m *CreatePartitionsInput) compareWithExistDB(tbName string) (warnings []string, err error) { +// CompareWithExistDB 检查重复库表 +func (m *CreatePartitionsInput) CompareWithExistDB(tbName string) (warnings []string, err error) { l := len(m.DbLikes) for i := 0; i < l; i++ { db := m.DbLikes[i] diff --git a/dbm-services/mysql/db-priv/assests/migrate.go b/dbm-services/mysql/db-priv/assests/migrate.go index 66cbaed50e..5b475c65b3 100644 --- a/dbm-services/mysql/db-priv/assests/migrate.go +++ b/dbm-services/mysql/db-priv/assests/migrate.go @@ -116,7 +116,7 @@ func DoMigratePlatformPassword() error { // 平台密码初始化,不存在新增 var users []ComponentPlatformUser users = append(users, ComponentPlatformUser{Component: "mysql", Usernames: []string{ - "dba_bak_all_sel", "MONITOR", "MONITOR_ALL", "mysql", "repl", "yw"}}) + "dba_bak_all_sel", "MONITOR", "MONITOR_ALL", "mysql", "repl", "yw", "partition_yw"}}) users = append(users, ComponentPlatformUser{Component: "proxy", Usernames: []string{"proxy"}}) users = append(users, ComponentPlatformUser{Component: "tbinlogdumper", Usernames: []string{"ADMIN"}}) diff --git a/dbm-services/mysql/db-priv/handler/admin_password.go b/dbm-services/mysql/db-priv/handler/admin_password.go index 07aa76880c..ae81fc8e95 100644 --- a/dbm-services/mysql/db-priv/handler/admin_password.go +++ b/dbm-services/mysql/db-priv/handler/admin_password.go @@ -109,12 +109,10 @@ func (m *PrivService) GetMysqlAdminPassword(c *gin.Context) { return } -// ModifyMysqlAdminPassword 新增或者修改mysql实例中管理用户的密码,可用于随机化密码 +// ModifyAdminPassword 新增或者修改mysql实例中管理用户的密码,可用于随机化密码 func (m *PrivService) ModifyAdminPassword(c *gin.Context) { slog.Info("do ModifyAdminPassword!") var input service.ModifyAdminUserPasswordPara - ticket := strings.TrimPrefix(c.FullPath(), "/priv/") - body, err := ioutil.ReadAll(c.Request.Body) if err != nil { slog.Error("msg", err) @@ -132,7 +130,7 @@ func (m *PrivService) ModifyAdminPassword(c *gin.Context) { SendResponse(c, nil, nil) } // 前端页面调用等同步返回,返回修改成功的实例以及没有修改成功的实例 - batch, err := input.ModifyAdminPassword(string(body), ticket) + batch, err := input.ModifyAdminPassword() if input.Async == false { SendResponse(c, err, batch) } diff --git a/dbm-services/mysql/db-priv/service/admin_password.go b/dbm-services/mysql/db-priv/service/admin_password.go index d73034496e..6bc8ad64ea 100644 --- a/dbm-services/mysql/db-priv/service/admin_password.go +++ b/dbm-services/mysql/db-priv/service/admin_password.go @@ -268,7 +268,7 @@ func (m *GetAdminUserPasswordPara) GetMysqlAdminPassword() ([]*TbPasswords, int, } // ModifyAdminPassword 修改mysql实例中用户的密码,可用于随机化密码 -func (m *ModifyAdminUserPasswordPara) ModifyAdminPassword(jsonPara string, ticket string) (BatchResult, error) { +func (m *ModifyAdminUserPasswordPara) ModifyAdminPassword() (BatchResult, error) { var errMsg Err var success Resource var fail Resource diff --git a/dbm-services/mysql/db-simulation/go.mod b/dbm-services/mysql/db-simulation/go.mod index a5d333c71b..09c826e2e6 100644 --- a/dbm-services/mysql/db-simulation/go.mod +++ b/dbm-services/mysql/db-simulation/go.mod @@ -8,6 +8,7 @@ require ( github.com/gin-contrib/requestid v0.0.6 github.com/gin-gonic/gin v1.9.1 github.com/pkg/errors v0.9.1 + github.com/samber/lo v1.39.0 github.com/spf13/viper v1.16.0 go.opentelemetry.io/contrib/instrumentation/github.com/gin-gonic/gin/otelgin v0.46.1 gopkg.in/yaml.v2 v2.4.0 @@ -71,6 +72,7 @@ require ( go.opentelemetry.io/otel/trace v1.21.0 // indirect golang.org/x/arch v0.5.0 // indirect golang.org/x/crypto v0.15.0 // indirect + golang.org/x/exp v0.0.0-20220303212507-bbda1eaf7a17 // indirect golang.org/x/net v0.18.0 // indirect golang.org/x/oauth2 v0.12.0 // indirect golang.org/x/sys v0.14.0 // indirect diff --git a/dbm-services/mysql/db-simulation/go.sum b/dbm-services/mysql/db-simulation/go.sum index 633692e4cc..1696444abd 100644 --- a/dbm-services/mysql/db-simulation/go.sum +++ b/dbm-services/mysql/db-simulation/go.sum @@ -269,6 +269,8 @@ github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTE github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE= github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= +github.com/samber/lo v1.39.0 h1:4gTz1wUhNYLhFSKl6O+8peW0v2F4BCY034GRpU9WnuA= +github.com/samber/lo v1.39.0/go.mod h1:+m/ZKRl6ClXCE2Lgf3MsQlWfh4bn1bz6CXEOxnEXnEA= github.com/spf13/afero v1.9.5 h1:stMpOSZFs//0Lv29HduCmli3GUfpFoF3Y1Q/aXj/wVM= github.com/spf13/afero v1.9.5/go.mod h1:UBogFpq8E9Hx+xc5CNTTEpTnuHVmXDwZcZcE1eb/UhQ= github.com/spf13/cast v1.5.1 h1:R+kOtfhWQE6TVQzY+4D7wJLBgkdVasCEFxSUBYBYIlA= @@ -347,6 +349,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/exp v0.0.0-20220303212507-bbda1eaf7a17 h1:3MTrJm4PyNL9NBqvYDSj3DHl46qQakyfqfWo4jgfaEM= +golang.org/x/exp v0.0.0-20220303212507-bbda1eaf7a17/go.mod h1:lgLbSvA5ygNOMpwM/9anMpWVlVJ7Z+cHWq/eFuinpGE= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= diff --git a/dbm-services/mysql/db-tools/dbactuator/go.mod b/dbm-services/mysql/db-tools/dbactuator/go.mod index d5038e7084..f54e6344d7 100644 --- a/dbm-services/mysql/db-tools/dbactuator/go.mod +++ b/dbm-services/mysql/db-tools/dbactuator/go.mod @@ -1,6 +1,8 @@ module dbm-services/mysql/db-tools/dbactuator -go 1.21 +go 1.21.0 + +toolchain go1.21.11 require ( ariga.io/atlas v0.14.0 diff --git a/dbm-ui/backend/db_meta/api/priv_manager/biz_clusters.py b/dbm-ui/backend/db_meta/api/priv_manager/biz_clusters.py index 0caa847439..958b2296a9 100644 --- a/dbm-ui/backend/db_meta/api/priv_manager/biz_clusters.py +++ b/dbm-ui/backend/db_meta/api/priv_manager/biz_clusters.py @@ -31,6 +31,7 @@ def biz_clusters(bk_biz_id: int, immute_domains: Optional[List[str]]): for cluster in qs: res.append( { + "id": cluster.id, "immute_domain": cluster.immute_domain, "cluster_type": cluster.cluster_type, "bk_biz_id": bk_biz_id, diff --git a/dbm-ui/backend/db_services/partition/handlers.py b/dbm-ui/backend/db_services/partition/handlers.py index 71eb96f72a..ef11f80796 100644 --- a/dbm-ui/backend/db_services/partition/handlers.py +++ b/dbm-ui/backend/db_services/partition/handlers.py @@ -7,7 +7,6 @@ an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ -import json from collections import defaultdict from typing import Any, Dict, List, Union @@ -20,7 +19,11 @@ from backend.db_meta.enums import ClusterType from backend.db_meta.models import Cluster from backend.db_services.partition.constants import QUERY_DATABASE_FIELD_TYPE, QUERY_UNIQUE_FIELDS_SQL -from backend.db_services.partition.exceptions import DBPartitionCreateException, DBPartitionInvalidFieldException +from backend.db_services.partition.exceptions import ( + DBPartitionCreateException, + DBPartitionInternalServerError, + DBPartitionInvalidFieldException, +) from backend.exceptions import ApiRequestError, ApiResultError from backend.ticket.constants import TicketType from backend.ticket.models import Ticket @@ -143,19 +146,6 @@ def execute_partition(cls, user: str, cluster_id: int, partition_objects: Dict[s auto_execute=True, ) ticket_list.append(model_to_dict(ticket)) - # 创建分区日志 - partition_log_data = { - "cluster_type": cluster.cluster_type, - "config_id": int(partition_data["config_id"]), - "bk_biz_id": cluster.bk_biz_id, - "cluster_id": cluster.id, - "bk_cloud_id": cluster.bk_cloud_id, - "ticket_id": ticket.id, - "immute_domain": cluster.immute_domain, - "time_zone": cluster.time_zone, - "ticket_detail": json.dumps(ticket.details), - } - DBPartitionApi.create_log(partition_log_data) return ticket_list @@ -205,6 +195,9 @@ def _verify_valid_index(_index_keys, _field): rpc_results = DRSApi.rpc( {"bk_cloud_id": cluster.bk_cloud_id, "addresses": [address], "cmds": [unique_fields_sql, fields_type_sql]} ) + if rpc_results[0]["cmd_results"] is None: + raise DBPartitionInternalServerError(_("字段信息查询错误:{}").format(rpc_results[0]["error_msg"])) + cmd__data = {res["cmd"]: res["table_data"] for res in rpc_results[0]["cmd_results"]} index_data, field_type_data = cmd__data[unique_fields_sql], cmd__data[fields_type_sql] diff --git a/dbm-ui/backend/db_services/partition/serializers.py b/dbm-ui/backend/db_services/partition/serializers.py index b16c7411a5..07b36180d7 100644 --- a/dbm-ui/backend/db_services/partition/serializers.py +++ b/dbm-ui/backend/db_services/partition/serializers.py @@ -14,10 +14,10 @@ from backend.db_meta.enums import ClusterType from backend.db_meta.models import AppCache, Cluster -from backend.ticket.builders.mysql.mysql_partition import PartitionObjectSerializer from ...ticket.builders.common.field import DBTimezoneField from ...ticket.builders.mysql.base import DBTableField +from ...ticket.builders.mysql.mysql_partition import PartitionObjectSerializer from . import mock diff --git a/dbm-ui/backend/db_services/partition/views.py b/dbm-ui/backend/db_services/partition/views.py index 8db5eed16e..d168e0fe9a 100644 --- a/dbm-ui/backend/db_services/partition/views.py +++ b/dbm-ui/backend/db_services/partition/views.py @@ -42,7 +42,6 @@ from ...iam_app.handlers.drf_perm.cluster import PartitionManagePermission from ...iam_app.handlers.permission import Permission from ...ticket.constants import TicketStatus -from ...ticket.models import Ticket from .constants import SWAGGER_TAG from .handlers import PartitionHandler @@ -57,12 +56,9 @@ class DBPartitionViewSet(viewsets.AuditedModelViewSet): @staticmethod def _update_log_status(log_list): # 更新分区日志的状态 - ticket_ids = [info["ticket_id"] for info in log_list if info["ticket_id"]] - ticket_id__ticket_map = {ticket.id: ticket for ticket in Ticket.objects.filter(id__in=ticket_ids)} for info in log_list: - ticket = ticket_id__ticket_map.get(info["ticket_id"], None) - info["status"] = ticket.status if ticket else (info["status"] or TicketStatus.PENDING) - + info["status"] = info["status"].upper() + info["status"] = info["status"] if info["status"] in TicketStatus.get_values() else TicketStatus.PENDING return log_list @common_swagger_auto_schema( diff --git a/dbm-ui/backend/flow/consts.py b/dbm-ui/backend/flow/consts.py index cd604eef72..3b60653988 100644 --- a/dbm-ui/backend/flow/consts.py +++ b/dbm-ui/backend/flow/consts.py @@ -1070,6 +1070,7 @@ class UserName(str, StructuredEnum): PROXY = EnumField("proxy", _("PROXY实例账号")) REDIS_DEFAULT = EnumField("default", _("REDIS默认账号")) HDFS_DEFAULT = EnumField("root", _("HDFS默认账号")) + PARTITION_YW = EnumField("partition_yw", _("分区实例账号")) class MySQLPrivComponent(str, StructuredEnum): diff --git a/dbm-ui/backend/flow/engine/bamboo/scene/common/download_file.py b/dbm-ui/backend/flow/engine/bamboo/scene/common/download_file.py new file mode 100644 index 0000000000..621e834f71 --- /dev/null +++ b/dbm-ui/backend/flow/engine/bamboo/scene/common/download_file.py @@ -0,0 +1,59 @@ +# -*- coding: utf-8 -*- +""" +TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-DB管理系统(BlueKing-BK-DBM) available. +Copyright (C) 2017-2023 THL A29 Limited, a Tencent company. All rights reserved. +Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. +You may obtain a copy of the License at https://opensource.org/licenses/MIT +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on +an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the +specific language governing permissions and limitations under the License. +""" +import logging +import os +from dataclasses import asdict +from typing import Dict, Optional + +from django.utils.translation import ugettext as _ + +from backend.configuration.constants import DBType +from backend.core.consts import BK_PKG_INSTALL_PATH +from backend.flow.engine.bamboo.scene.common.builder import Builder, SubBuilder +from backend.flow.engine.bamboo.scene.common.get_file_list import GetFileList +from backend.flow.plugins.components.collections.mysql.trans_flies import TransFileComponent +from backend.flow.utils.mysql.mysql_act_dataclass import DownloadMediaKwargs + +logger = logging.getLogger("flow") + + +class DownloadFileFlow(object): + def __init__(self, root_id: str, data: Optional[Dict]): + self.root_id = root_id + self.data = data + + def download_file_flow(self): + """ + 下载指定文件到机器上 + """ + pipeline = Builder(root_id=self.root_id, data=self.data) + sub_pipelines = [] + print(self.data["files"]) + for file in self.data["files"]: + sub_pipeline = SubBuilder(root_id=self.root_id, data=self.data) + sub_pipeline.add_act( + act_name=_("下发文件"), + act_component_code=TransFileComponent.code, + kwargs=asdict( + DownloadMediaKwargs( + bk_cloud_id=file["bk_cloud_id"], + exec_ip=file["ip"], + file_target_path=os.path.join(BK_PKG_INSTALL_PATH, "partition"), + file_list=GetFileList(db_type=DBType.MySQL).mysql_import_sqlfile( + path=self.data["path"], filelist=[file["file_name"]] + ), + ) + ), + ) + sub_pipelines.append(sub_pipeline.build_sub_process(sub_name=_("ip[{}]下载文件").format(file["ip"]))) + pipeline.add_parallel_sub_pipeline(sub_flow_list=sub_pipelines) + logger.info(_("构建下载文件流程成功")) + pipeline.run_pipeline() diff --git a/dbm-ui/backend/flow/engine/bamboo/scene/mysql/mysql_partition.py b/dbm-ui/backend/flow/engine/bamboo/scene/mysql/mysql_partition.py index 1115ff4e88..7548230c9c 100644 --- a/dbm-ui/backend/flow/engine/bamboo/scene/mysql/mysql_partition.py +++ b/dbm-ui/backend/flow/engine/bamboo/scene/mysql/mysql_partition.py @@ -2,6 +2,7 @@ import json import logging import os +import time from dataclasses import asdict from typing import Dict, Optional @@ -9,13 +10,11 @@ from backend.configuration.constants import DBType from backend.core.consts import BK_PKG_INSTALL_PATH -from backend.db_meta.enums import InstanceRole -from backend.db_meta.exceptions import ClusterNotExistException, MasterInstanceNotExistException -from backend.db_meta.models import Cluster, StorageInstance from backend.flow.consts import DBA_ROOT_USER, LONG_JOB_TIMEOUT from backend.flow.engine.bamboo.scene.common.builder import Builder, SubBuilder from backend.flow.engine.bamboo.scene.common.get_file_list import GetFileList from backend.flow.plugins.components.collections.mysql.exec_actuator_script import ExecuteDBActuatorScriptComponent +from backend.flow.plugins.components.collections.mysql.mysql_partition_report import MysqlPartitionReportComponent from backend.flow.plugins.components.collections.mysql.trans_flies import TransFileComponent from backend.flow.plugins.components.collections.mysql.upload_file import UploadFileServiceComponent from backend.flow.utils.mysql.mysql_act_dataclass import DownloadMediaKwargs, ExecActuatorKwargs, UploadFile @@ -74,25 +73,21 @@ def __init__(self, root_id: str, data: Optional[Dict]): def mysql_partition_flow(self): """ - 增加单据临时ADMIN账号的添加和删除逻辑 每个分区配置一个子流程: (1)检查表结构 (2)获取分区变更的sql (3)dbactor执行分区指令 """ - cluster_ids = [i["cluster_id"] for i in self.data["infos"]] - mysql_partition_pipeline = Builder( - root_id=self.root_id, data=self.data, need_random_pass_cluster_ids=list(set(cluster_ids)) - ) + mysql_partition_pipeline = Builder(root_id=self.root_id, data=self.data) sub_pipelines = [] + cron_date = {"cron_date": time.strftime("%Y%m%d", time.localtime())} for info in self.data["infos"]: sub_data = copy.deepcopy(self.data) sub_data.pop("infos") - sub_pipeline = SubBuilder(root_id=self.root_id, data={**sub_data, **info}) + sub_pipeline = SubBuilder(root_id=self.root_id, data={**sub_data, **info, **cron_date}) bk_cloud_id = info["bk_cloud_id"] - ip, port = _get_master_instance(info["cluster_id"], self.data["bk_biz_id"]) - partition_object = info["partition_objects"][0] - filename = "partition_sql_file_{}_{}_{}.txt".format(ip, port, self.data["uid"]) + ip, port = info["partition_objects"][0]["ip"], info["partition_objects"][0]["port"] + filename = "partition_sql_file_{}_{}_{}.json".format(ip, port, self.data["uid"]) sub_pipeline.add_act( act_name=_("上传sql文件"), @@ -100,7 +95,7 @@ def mysql_partition_flow(self): kwargs=asdict( UploadFile( path=os.path.join(BKREPO_PARTITION_PATH, filename), - content=json.dumps(partition_object["execute_objects"]), + content=json.dumps(info["partition_objects"]), ) ), ) @@ -119,9 +114,19 @@ def mysql_partition_flow(self): ) ), ) + sub_pipeline.add_act( + act_name=_("下发actuator介质"), + act_component_code=TransFileComponent.code, + kwargs=asdict( + DownloadMediaKwargs( + bk_cloud_id=bk_cloud_id, + exec_ip=ip, + file_list=GetFileList(db_type=DBType.MySQL).get_db_actuator_package(), + ) + ), + ) - cluster = {"ip": ip, "port": port, "file_path": os.path.join(BK_PKG_INSTALL_PATH, "partition", filename)} - + cluster = {"ip": ip, "file_path": filename} sub_pipeline.add_act( act_name=_("actuator执行partition"), act_component_code=ExecuteDBActuatorScriptComponent.code, @@ -135,28 +140,18 @@ def mysql_partition_flow(self): cluster=cluster, ) ), + write_payload_var="partition_report", + ) + + sub_pipeline.add_act( + act_name=_("生成分区执行报告"), + act_component_code=MysqlPartitionReportComponent.code, + kwargs={}, ) + sub_pipelines.append( sub_pipeline.build_sub_process(sub_name=_("cluster[{}]的分区任务").format(info["immute_domain"])) ) mysql_partition_pipeline.add_parallel_sub_pipeline(sub_flow_list=sub_pipelines) logger.info(_("构建mysql partition流程成功")) - mysql_partition_pipeline.run_pipeline(init_trans_data_class=MysqlPartitionContext(), is_drop_random_user=True) - - -def _get_master_instance(cluster_id, bk_biz_id): - try: - cluster = Cluster.objects.get(id=cluster_id, bk_biz_id=bk_biz_id) - except Cluster.DoesNotExist: - logger.error("cluster 「 bk_biz_id = {}, cluster_id = {} 」 not exists".format(bk_biz_id, cluster_id)) - raise ClusterNotExistException(cluster_id=cluster_id) - try: - master = cluster.storageinstance_set.filter( - instance_role__in=[InstanceRole.BACKEND_MASTER, InstanceRole.ORPHAN], - ).first() - except StorageInstance.DoesNotExist: - logger.error( - "master or orphan instance「 bk_biz_id = {}, cluster_id = {} 」 not exists".format(bk_biz_id, cluster_id) - ) - raise MasterInstanceNotExistException(cluster_type=cluster.cluster_type, cluster_id=cluster_id) - return master.machine.ip, master.port + mysql_partition_pipeline.run_pipeline(init_trans_data_class=MysqlPartitionContext()) diff --git a/dbm-ui/backend/flow/engine/bamboo/scene/mysql/mysql_partition_cron.py b/dbm-ui/backend/flow/engine/bamboo/scene/mysql/mysql_partition_cron.py new file mode 100644 index 0000000000..ff03848e64 --- /dev/null +++ b/dbm-ui/backend/flow/engine/bamboo/scene/mysql/mysql_partition_cron.py @@ -0,0 +1,78 @@ +import copy +import logging +from dataclasses import asdict +from typing import Dict, Optional + +from django.utils.translation import ugettext as _ + +from backend.flow.consts import DBA_ROOT_USER, LONG_JOB_TIMEOUT +from backend.flow.engine.bamboo.scene.common.builder import Builder, SubBuilder +from backend.flow.plugins.components.collections.mysql.exec_actuator_script import ExecuteDBActuatorScriptComponent +from backend.flow.plugins.components.collections.mysql.mysql_partition_report import MysqlPartitionReportComponent +from backend.flow.utils.mysql.mysql_act_dataclass import ExecActuatorKwargs +from backend.flow.utils.mysql.mysql_act_playload import MysqlActPayload +from backend.flow.utils.mysql.mysql_context_dataclass import MysqlPartitionContext + +logger = logging.getLogger("flow") + + +class MysqlPartitionCronFlow(object): + """ + 分区定时任务单据的流程引擎 + { + "uid": "xxx", + "root_id": 123, + "created_by": "xxx", + "bk_biz_id": "xxx", + "ticket_type": "MYSQL_PARTITION_CRON", + "infos": [ + { + "bk_cloud_id": 0, + "ip": "1.1.1.1", + "file_name": "xxx" + } + ] + } + """ + + def __init__(self, root_id: str, data: Optional[Dict]): + self.root_id = root_id + self.data = data + + def mysql_partition_cron_flow(self): + """ + 每个分区配置一个子流程: + (1)检查表结构 + (2)获取分区变更的sql + (3)dbactor执行分区指令 + """ + mysql_partition_pipeline = Builder(root_id=self.root_id, data=self.data) + sub_pipelines = [] + for info in self.data["infos"]: + sub_data = copy.deepcopy(self.data) + sub_data.pop("infos") + sub_pipeline = SubBuilder(root_id=self.root_id, data={**sub_data, **info}) + sub_pipeline.add_act( + act_name=_("actuator执行partition"), + act_component_code=ExecuteDBActuatorScriptComponent.code, + kwargs=asdict( + ExecActuatorKwargs( + job_timeout=LONG_JOB_TIMEOUT, + exec_ip=info["ip"], + bk_cloud_id=info["bk_cloud_id"], + run_as_system_user=DBA_ROOT_USER, + get_mysql_payload_func=MysqlActPayload.get_partition_cron_payload.__name__, + ) + ), + write_payload_var="partition_report", + ) + + sub_pipeline.add_act( + act_name=_("生成分区执行报告"), + act_component_code=MysqlPartitionReportComponent.code, + kwargs={}, + ) + sub_pipelines.append(sub_pipeline.build_sub_process(sub_name=_("ip[{}]的分区任务").format(info["ip"]))) + mysql_partition_pipeline.add_parallel_sub_pipeline(sub_flow_list=sub_pipelines) + logger.info(_("构建mysql partition 定时任务流程成功")) + mysql_partition_pipeline.run_pipeline(init_trans_data_class=MysqlPartitionContext(), is_drop_random_user=True) diff --git a/dbm-ui/backend/flow/engine/bamboo/scene/spider/spider_partition.py b/dbm-ui/backend/flow/engine/bamboo/scene/spider/spider_partition.py index 8641b06e55..8dac06b638 100644 --- a/dbm-ui/backend/flow/engine/bamboo/scene/spider/spider_partition.py +++ b/dbm-ui/backend/flow/engine/bamboo/scene/spider/spider_partition.py @@ -2,6 +2,8 @@ import json import logging import os +import time +from collections import defaultdict from dataclasses import asdict from typing import Dict, Optional @@ -13,9 +15,13 @@ from backend.flow.engine.bamboo.scene.common.builder import Builder, SubBuilder from backend.flow.engine.bamboo.scene.common.get_file_list import GetFileList from backend.flow.plugins.components.collections.mysql.exec_actuator_script import ExecuteDBActuatorScriptComponent +from backend.flow.plugins.components.collections.mysql.mysql_partition_report import MysqlPartitionReportComponent from backend.flow.plugins.components.collections.mysql.trans_flies import TransFileComponent from backend.flow.plugins.components.collections.mysql.upload_file import UploadFileServiceComponent -from backend.flow.utils.mysql.mysql_act_dataclass import DownloadMediaKwargs, ExecActuatorKwargs, UploadFile +from backend.flow.plugins.components.collections.spider.spider_partition_callback import ( + SpiderPartitionCallbackComponent, +) +from backend.flow.utils.mysql.mysql_act_dataclass import DownloadMediaKwargs, ExecActuatorKwargs, IpKwargs, UploadFile from backend.flow.utils.mysql.mysql_act_playload import MysqlActPayload from backend.flow.utils.mysql.mysql_context_dataclass import MysqlPartitionContext @@ -75,42 +81,40 @@ def spider_partition_flow(self): (1)检查表结构 (2)获取分区变更的sql (3)dbactor执行分区指令 - 增加单据临时ADMIN账号的添加和删除逻辑 """ - cluster_ids = [i["cluster_id"] for i in self.data["infos"]] - partition_pipeline = Builder( - root_id=self.root_id, data=self.data, need_random_pass_cluster_ids=list(set(cluster_ids)) - ) + partition_pipeline = Builder(root_id=self.root_id, data=self.data) sub_pipelines = [] + cron_date = {"cron_date": time.strftime("%Y%m%d", time.localtime())} + sub_data = copy.deepcopy(self.data) + sub_data.pop("infos") for info in self.data["infos"]: - sub_data = copy.deepcopy(self.data) - sub_data.pop("infos") - sub_pipeline = SubBuilder(root_id=self.root_id, data={**sub_data, **info}) + sub_pipeline = SubBuilder(root_id=self.root_id, data={**sub_data, **info, **cron_date}) bk_cloud_id = info["bk_cloud_id"] upload_sql_file_list = [] download_sql_file_list = [] - actuator_exec_list = [] + download_actuator_list = [] + sub_sub_pipelines = [] + ip_sqls = defaultdict(list) for partition_object in info["partition_objects"]: ip = partition_object["ip"] - port = partition_object["port"] - shard = partition_object["shard_name"] - address_tip = "{} {}:{}".format(shard, ip, port) - filename = _("partition_sql_file_{}_{}_{}_{}.txt".format(ip, port, shard, self.data["uid"])) - + ip_sqls[ip].append(partition_object) + for ip in ip_sqls: + sqls = ip_sqls[ip] + filename = _("partition_sql_file_{}_{}.json".format(ip, self.data["uid"])) upload_sql_file = dict() - upload_sql_file["act_name"] = _("{}: {}".format("上传sql文件", address_tip)) + upload_sql_file["act_name"] = _("{}: {}".format("上传sql文件", ip)) upload_sql_file["act_component_code"] = UploadFileServiceComponent.code upload_sql_file["kwargs"] = asdict( UploadFile( path=os.path.join(BKREPO_PARTITION_PATH, filename), - content=json.dumps(partition_object["execute_objects"]), + content=json.dumps(sqls), ) ) upload_sql_file_list.append(upload_sql_file) sql_file_info = dict() - sql_file_info["act_name"] = _("{}: {}".format(_("下发sql文件"), address_tip)) + sql_file_info["act_name"] = _("{}: {}".format(_("下发sql文件"), ip)) sql_file_info["act_component_code"] = TransFileComponent.code sql_file_info["kwargs"] = asdict( DownloadMediaKwargs( @@ -124,35 +128,62 @@ def spider_partition_flow(self): ) download_sql_file_list.append(sql_file_info) + download_actuator_info = dict() + download_actuator_info["act_name"] = _("{}: {}".format(_("下发dbactor文件"), ip)) + download_actuator_info["act_component_code"] = TransFileComponent.code + download_actuator_info["kwargs"] = asdict( + DownloadMediaKwargs( + bk_cloud_id=bk_cloud_id, + exec_ip=ip, + file_list=GetFileList(db_type=DBType.MySQL).get_db_actuator_package(), + ) + ) + download_actuator_list.append(download_actuator_info) + cluster = { "ip": ip, - "port": port, - "shard_name": shard, - "file_path": os.path.join(BK_PKG_INSTALL_PATH, "partition", filename), + "file_path": filename, } - exec_info = dict() - exec_info["act_name"] = _("{}: {}".format(_("actuator执行partition"), address_tip)) - exec_info["act_component_code"] = ExecuteDBActuatorScriptComponent.code - exec_info["kwargs"] = asdict( - ExecActuatorKwargs( - exec_ip=ip, - bk_cloud_id=bk_cloud_id, - run_as_system_user=DBA_ROOT_USER, - get_mysql_payload_func=MysqlActPayload.get_partition_payload.__name__, - cluster=cluster, - job_timeout=LONG_JOB_TIMEOUT, - ) + sub_sub_pipeline = SubBuilder(root_id=self.root_id, data={**sub_data, **info, **cron_date}) + sub_sub_pipeline.add_act( + act_name=_("{}: {}".format(_("actuator执行partition"), ip)), + act_component_code=ExecuteDBActuatorScriptComponent.code, + kwargs=asdict( + ExecActuatorKwargs( + job_timeout=LONG_JOB_TIMEOUT, + exec_ip=ip, + bk_cloud_id=bk_cloud_id, + run_as_system_user=DBA_ROOT_USER, + get_mysql_payload_func=MysqlActPayload.get_partition_payload.__name__, + cluster=cluster, + ) + ), + write_payload_var="partition_report", + ) + sub_sub_pipeline.add_act( + act_name=_("生成分区执行报告"), + act_component_code=MysqlPartitionReportComponent.code, + kwargs=asdict(IpKwargs(ip=ip)), + ) + sub_sub_pipelines.append( + sub_sub_pipeline.build_sub_process(sub_name=_("{}: {}").format(_("actuator执行partition"), ip)) ) - actuator_exec_list.append(exec_info) sub_pipeline.add_parallel_acts(acts_list=upload_sql_file_list) sub_pipeline.add_parallel_acts(acts_list=download_sql_file_list) - sub_pipeline.add_parallel_acts(acts_list=actuator_exec_list) - + sub_pipeline.add_parallel_acts(acts_list=download_actuator_list) + sub_pipeline.add_parallel_sub_pipeline(sub_flow_list=sub_sub_pipelines) sub_pipelines.append( sub_pipeline.build_sub_process(sub_name=_("cluster[{}]的分区任务").format(info["immute_domain"])) ) partition_pipeline.add_parallel_sub_pipeline(sub_flow_list=sub_pipelines) + sub_pipeline_callback = SubBuilder(root_id=self.root_id, data={**sub_data, **cron_date}) + sub_pipeline_callback.add_act( + act_name=_("录入分区日志"), + act_component_code=SpiderPartitionCallbackComponent.code, + kwargs={}, + ) + partition_pipeline.add_sub_pipeline(sub_pipeline_callback.build_sub_process(sub_name=_("录入分区日志"))) logger.info(_("构建spider partition流程成功")) - partition_pipeline.run_pipeline(init_trans_data_class=MysqlPartitionContext(), is_drop_random_user=True) + partition_pipeline.run_pipeline(init_trans_data_class=MysqlPartitionContext()) diff --git a/dbm-ui/backend/flow/engine/bamboo/scene/spider/spider_partition_cron.py b/dbm-ui/backend/flow/engine/bamboo/scene/spider/spider_partition_cron.py new file mode 100644 index 0000000000..fce1f41539 --- /dev/null +++ b/dbm-ui/backend/flow/engine/bamboo/scene/spider/spider_partition_cron.py @@ -0,0 +1,88 @@ +import copy +import logging +from dataclasses import asdict +from typing import Dict, Optional + +from django.utils.translation import ugettext as _ + +from backend.flow.consts import DBA_ROOT_USER, LONG_JOB_TIMEOUT +from backend.flow.engine.bamboo.scene.common.builder import Builder, SubBuilder +from backend.flow.plugins.components.collections.mysql.exec_actuator_script import ExecuteDBActuatorScriptComponent +from backend.flow.plugins.components.collections.mysql.mysql_partition_report import MysqlPartitionReportComponent +from backend.flow.plugins.components.collections.spider.spider_partition_callback import ( + SpiderPartitionCallbackComponent, +) +from backend.flow.utils.mysql.mysql_act_dataclass import ExecActuatorKwargs +from backend.flow.utils.mysql.mysql_act_playload import MysqlActPayload +from backend.flow.utils.mysql.mysql_context_dataclass import MysqlPartitionContext + +logger = logging.getLogger("flow") + + +class SpiderPartitionCronFlow(object): + """ + 分区定时任务单据的流程引擎 + { + "uid": "xxx", + "root_id": 123, + "created_by": "xxx", + "bk_biz_id": "xxx", + "ticket_type": "TENDBCLUSTER_PARTITION_CRON", + "infos": [ + { + "bk_cloud_id": 0, + "ip": "1.1.1.1", + "file_name": "xxx" + } + ] + } + """ + + def __init__(self, root_id: str, data: Optional[Dict]): + self.root_id = root_id + self.data = data + + def spider_partition_cron_flow(self): + """ + 每个分区配置一个子流程: + (1)检查表结构 + (2)获取分区变更的sql + (3)dbactor执行分区指令 + """ + spider_partition_pipeline = Builder(root_id=self.root_id, data=self.data) + sub_pipelines = [] + sub_data = copy.deepcopy(self.data) + sub_data.pop("infos") + for info in self.data["infos"]: + sub_pipeline = SubBuilder(root_id=self.root_id, data={**sub_data, **info}) + sub_pipeline.add_act( + act_name=_("actuator执行partition"), + act_component_code=ExecuteDBActuatorScriptComponent.code, + kwargs=asdict( + ExecActuatorKwargs( + job_timeout=LONG_JOB_TIMEOUT, + exec_ip=info["ip"], + bk_cloud_id=info["bk_cloud_id"], + run_as_system_user=DBA_ROOT_USER, + get_mysql_payload_func=MysqlActPayload.get_partition_cron_payload.__name__, + ) + ), + write_payload_var="partition_report", + ) + + sub_pipeline.add_act( + act_name=_("生成分区执行报告"), + act_component_code=MysqlPartitionReportComponent.code, + kwargs={}, + ) + sub_pipelines.append(sub_pipeline.build_sub_process(sub_name=_("ip[{}]的分区任务").format(info["ip"]))) + spider_partition_pipeline.add_parallel_sub_pipeline(sub_flow_list=sub_pipelines) + sub_pipeline_callback = SubBuilder(root_id=self.root_id, data={**sub_data}) + sub_pipeline_callback.add_act( + act_name=_("录入分区日志"), + act_component_code=SpiderPartitionCallbackComponent.code, + kwargs={}, + ) + spider_partition_pipeline.add_sub_pipeline(sub_pipeline_callback.build_sub_process(sub_name=_("录入分区日志"))) + logger.info(_("构建spider partition 定时任务流程成功")) + spider_partition_pipeline.run_pipeline(init_trans_data_class=MysqlPartitionContext(), is_drop_random_user=True) diff --git a/dbm-ui/backend/flow/engine/controller/mysql.py b/dbm-ui/backend/flow/engine/controller/mysql.py index b174489f9e..711bc59612 100644 --- a/dbm-ui/backend/flow/engine/controller/mysql.py +++ b/dbm-ui/backend/flow/engine/controller/mysql.py @@ -12,6 +12,7 @@ from backend.db_meta.enums import ClusterType from backend.flow.engine.bamboo.scene.common.download_dbactor import DownloadDbactorFlow from backend.flow.engine.bamboo.scene.mysql.dbconsole import DbConsoleDumpSqlFlow +from backend.flow.engine.bamboo.scene.common.download_file import DownloadFileFlow from backend.flow.engine.bamboo.scene.mysql.import_sqlfile_flow import ImportSQLFlow from backend.flow.engine.bamboo.scene.mysql.mysql_authorize_rules import MySQLAuthorizeRules from backend.flow.engine.bamboo.scene.mysql.mysql_checksum import MysqlChecksumFlow @@ -34,6 +35,7 @@ from backend.flow.engine.bamboo.scene.mysql.mysql_migrate_cluster_remote_flow import MySQLMigrateClusterRemoteFlow from backend.flow.engine.bamboo.scene.mysql.mysql_open_area_flow import MysqlOpenAreaFlow from backend.flow.engine.bamboo.scene.mysql.mysql_partition import MysqlPartitionFlow +from backend.flow.engine.bamboo.scene.mysql.mysql_partition_cron import MysqlPartitionCronFlow from backend.flow.engine.bamboo.scene.mysql.mysql_proxy_cluster_add import MySQLProxyClusterAddFlow from backend.flow.engine.bamboo.scene.mysql.mysql_proxy_cluster_switch import MySQLProxyClusterSwitchFlow from backend.flow.engine.bamboo.scene.mysql.mysql_proxy_upgrade import MySQLProxyLocalUpgradeFlow @@ -335,6 +337,13 @@ def mysql_partition(self): flow = MysqlPartitionFlow(root_id=self.root_id, data=self.ticket_data) flow.mysql_partition_flow() + def mysql_partition_cron(self): + """ + mysql 表分区 + """ + flow = MysqlPartitionCronFlow(root_id=self.root_id, data=self.ticket_data) + flow.mysql_partition_cron_flow() + def mysql_fake_sql_semantic_check_scene(self): """ 测试专用,模拟SQL语义检查场景 @@ -626,3 +635,10 @@ def dbconsole_dump_scene(self): """ flow = DbConsoleDumpSqlFlow(root_id=self.root_id, data=self.ticket_data) flow.dump_flow() + + def download_file_scene(self): + """ + 下载文件 + """ + flow = DownloadFileFlow(root_id=self.root_id, data=self.ticket_data) + flow.download_file_flow() diff --git a/dbm-ui/backend/flow/engine/controller/spider.py b/dbm-ui/backend/flow/engine/controller/spider.py index 42168519e6..81dba38344 100644 --- a/dbm-ui/backend/flow/engine/controller/spider.py +++ b/dbm-ui/backend/flow/engine/controller/spider.py @@ -31,6 +31,7 @@ from backend.flow.engine.bamboo.scene.spider.spider_cluster_standardize_flow import SpiderClusterStandardizeFlow from backend.flow.engine.bamboo.scene.spider.spider_cluster_truncate_database import SpiderTruncateDatabaseFlow from backend.flow.engine.bamboo.scene.spider.spider_partition import SpiderPartitionFlow +from backend.flow.engine.bamboo.scene.spider.spider_partition_cron import SpiderPartitionCronFlow from backend.flow.engine.bamboo.scene.spider.spider_reduce_mnt import TenDBClusterReduceMNTFlow from backend.flow.engine.bamboo.scene.spider.spider_reduce_nodes import TenDBClusterReduceNodesFlow from backend.flow.engine.bamboo.scene.spider.spider_remotedb_rebalance_flow import TenDBRemoteRebalanceFlow @@ -72,11 +73,18 @@ def spider_checksum(self): def spider_partition(self): """ - mysql 表分区 + spider 表分区 """ flow = SpiderPartitionFlow(root_id=self.root_id, data=self.ticket_data) flow.spider_partition_flow() + def spider_partition_cron(self): + """ + spider 表分区定时任务 + """ + flow = SpiderPartitionCronFlow(root_id=self.root_id, data=self.ticket_data) + flow.spider_partition_cron_flow() + def spider_cluster_disable_scene(self): """ 禁用spider集群场景 diff --git a/dbm-ui/backend/flow/plugins/components/collections/mysql/mysql_partition_report.py b/dbm-ui/backend/flow/plugins/components/collections/mysql/mysql_partition_report.py new file mode 100644 index 0000000000..07499020f0 --- /dev/null +++ b/dbm-ui/backend/flow/plugins/components/collections/mysql/mysql_partition_report.py @@ -0,0 +1,128 @@ +# -*- coding: utf-8 -*- +""" +TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-DB管理系统(BlueKing-BK-DBM) available. +Copyright (C) 2017-2023 THL A29 Limited, a Tencent company. All rights reserved. +Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. +You may obtain a copy of the License at https://opensource.org/licenses/MIT +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on +an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the +specific language governing permissions and limitations under the License. +""" +import logging +from collections import defaultdict + +from django.db.transaction import atomic +from django.utils.translation import ugettext as _ +from pipeline.component_framework.component import Component + +from backend.components.mysql_partition.client import DBPartitionApi +from backend.flow.plugins.components.collections.common.base_service import BaseService +from backend.ticket.constants import TicketType +from backend.ticket.models import Ticket + +logger = logging.getLogger("flow") + + +class MySQLPartitionReportService(BaseService): + def _execute(self, data, parent_data) -> bool: + global_data = data.get_one_of_inputs("global_data") + trans_data = data.get_one_of_inputs("trans_data") + kwargs = data.get_one_of_inputs("kwargs") + + log_para = [] + fail_list = [] + success_list = [] + configs = defaultdict(list) + if trans_data.partition_report: + if trans_data.partition_report["summaries"]: + summaries = trans_data.partition_report["summaries"] + for item in summaries: + configs[item["config_id"]].append(item) + for config, logs in configs.items(): + log_status = "succeeded" + err_msg = "" + for log in logs: + if log["status"] == "failed": + log_status = "failed" + err_msg = "{};{}".format(err_msg, log["msg"]) + err_msg = err_msg.strip(";") + item = { + "config_id": config, + "check_info": err_msg, + "status": log_status, + "cron_date": global_data["cron_date"], + "scheduler": global_data["created_by"], + } + log_para.append(item) + if log_status == "succeeded": + success_list.append(item) + elif log_status == "failed": + fail_list.append(item) + else: + self.log_error(_("不支持的状态类型: [{}]").format(log_status)) + return False + + if log_para: + if ( + global_data["ticket_type"] == TicketType.MYSQL_PARTITION + or global_data["ticket_type"] == TicketType.MYSQL_PARTITION_CRON + ): + cluster_type = "tendbha" + partition_log_data = { + "cluster_type": cluster_type, + "logs": log_para, + } + try: + DBPartitionApi.create_log(partition_log_data) + except Exception as e: # pylint: disable=broad-except + self.log_error(_("callback分区create_log接口异常: {}").format(e)) + return False + elif ( + global_data["ticket_type"] == TicketType.TENDBCLUSTER_PARTITION + or global_data["ticket_type"] == TicketType.TENDBCLUSTER_PARTITION_CRON + ): + # 原子更新:将校验结果插入ticket信息中,用后后续ticket flow上下文获取 + with atomic(): + if global_data["ticket_type"] == TicketType.TENDBCLUSTER_PARTITION: + ip = kwargs["ip"] + else: + ip = global_data["ip"] + ticket = Ticket.objects.select_for_update().get(id=global_data["uid"]) + flags = ticket.details.get("log_list", {}) + flags.update({ip: log_para}) + ticket.update_details(log_list=flags) + else: + self.log_error(_("不支持的单据类型: [{}]").format(global_data["ticket_type"])) + return False + + self.print_log(fail_list, success_list) + if fail_list and ( + global_data["ticket_type"] == TicketType.MYSQL_PARTITION + or global_data["ticket_type"] == TicketType.MYSQL_PARTITION_CRON + ): + return False + return True + + def print_log(self, fail_list, success_list): + self.log_info(_("ERROR 执行失败的分区规则的数量: {}").format(len(fail_list))) + self.log_info(_("SUCCESS 执行成功的分区规则的数量: {}").format((len(success_list)))) + self.log_dividing_line() + indet_format = " " * 8 + + if fail_list: + self.log_info(_("ERROR 执行失败的分区规则:")) + for item in fail_list: + self.log_info(indet_format + "config id:[{}] msg:[{}]".format(item["config_id"], item["check_info"])) + self.log_dividing_line() + + if success_list: + self.log_info(_("SUCCESS 执行成功的分区规则:")) + for item in success_list: + self.log_info(indet_format + "config id:[{}]".format(item["config_id"])) + self.log_dividing_line() + + +class MysqlPartitionReportComponent(Component): + name = __name__ + code = "mysql_partition_report" + bound_service = MySQLPartitionReportService diff --git a/dbm-ui/backend/flow/plugins/components/collections/spider/spider_partition_callback.py b/dbm-ui/backend/flow/plugins/components/collections/spider/spider_partition_callback.py new file mode 100644 index 0000000000..87189b56c2 --- /dev/null +++ b/dbm-ui/backend/flow/plugins/components/collections/spider/spider_partition_callback.py @@ -0,0 +1,79 @@ +# -*- coding: utf-8 -*- +""" +TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-DB管理系统(BlueKing-BK-DBM) available. +Copyright (C) 2017-2023 THL A29 Limited, a Tencent company. All rights reserved. +Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. +You may obtain a copy of the License at https://opensource.org/licenses/MIT +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on +an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the +specific language governing permissions and limitations under the License. +""" +import logging +from collections import defaultdict + +from django.utils.translation import ugettext as _ +from pipeline.component_framework.component import Component + +from backend.components.mysql_partition.client import DBPartitionApi +from backend.flow.plugins.components.collections.common.base_service import BaseService +from backend.flow.plugins.components.collections.mysql.mysql_partition_report import MySQLPartitionReportService +from backend.ticket.models import Ticket + +logger = logging.getLogger("flow") + + +class SpiderPartitionCallbackService(BaseService): + def _execute(self, data, parent_data) -> bool: + global_data = data.get_one_of_inputs("global_data") + configs = defaultdict(list) + log_para = [] + fail_list = [] + success_list = [] + ticket = Ticket.objects.get(id=global_data["uid"]) + flags = ticket.details.get("log_list", {}) + for log_list in flags.values(): + for log in log_list: + configs[log["config_id"]].append(log) + for config, logs in configs.items(): + log_status = "succeeded" + err_msg = "" + for log in logs: + if log["status"] == "failed": + log_status = "failed" + err_msg = "{};{}".format(err_msg, log["check_info"]) + err_msg = err_msg.strip(";") + item = { + "config_id": config, + "check_info": err_msg, + "status": log_status, + "cron_date": global_data["cron_date"], + "scheduler": global_data["created_by"], + } + log_para.append(item) + if log_status == "succeeded": + success_list.append(item) + elif log_status == "failed": + fail_list.append(item) + else: + self.log_error(_("不支持的状态类型: [{}]").format(log_status)) + return False + if log_para: + try: + partition_log_data = { + "cluster_type": "tendbcluster", + "logs": log_para, + } + DBPartitionApi.create_log(partition_log_data) + except Exception as e: # pylint: disable=broad-except + self.log_error(_("callback分区create_log接口异常: {}").format(e)) + return False + MySQLPartitionReportService.print_log(self, fail_list, success_list) + if fail_list: + return False + return True + + +class SpiderPartitionCallbackComponent(Component): + name = __name__ + code = "spider_partition_callback" + bound_service = SpiderPartitionCallbackService diff --git a/dbm-ui/backend/flow/urls.py b/dbm-ui/backend/flow/urls.py index f90fa21b8b..729cb888bb 100644 --- a/dbm-ui/backend/flow/urls.py +++ b/dbm-ui/backend/flow/urls.py @@ -30,6 +30,7 @@ from backend.flow.views.doris_scale_up import ScaleUpDorisSceneApiView from backend.flow.views.doris_shrink import ShrinkDorisSceneApiView from backend.flow.views.download_dbactor import DownloadDbactorApiView +from backend.flow.views.download_file import DownloadFileApiView from backend.flow.views.es_apply import InstallEsSceneApiView from backend.flow.views.es_destroy import DestroyEsSceneApiView from backend.flow.views.es_disable import DisableEsSceneApiView @@ -114,6 +115,7 @@ from backend.flow.views.mysql_migrate_cluster_remote import MysqlMigrateRemoteSceneApiView from backend.flow.views.mysql_open_area import MysqlOpenAreaSceneApiView from backend.flow.views.mysql_partition import MysqlPartitionSceneApiView +from backend.flow.views.mysql_partition_cron import MysqlPartitionCronSceneApiView from backend.flow.views.mysql_proxy_add import AddMySQLProxySceneApiView from backend.flow.views.mysql_proxy_switch import SwitchMySQLProxySceneApiView from backend.flow.views.mysql_proxy_upgrade import UpgradeMySQLProxySceneApiView @@ -203,6 +205,7 @@ from backend.flow.views.spider_cluster_rename_database import TenDBClusterRenameDatabaseView from backend.flow.views.spider_cluster_truncate_database import TenDBClusterTruncateDatabaseView from backend.flow.views.spider_partition import SpiderPartitionSceneApiView +from backend.flow.views.spider_partition_cron import SpiderPartitionCronSceneApiView from backend.flow.views.spider_reduce_mnt import ReduceSpiderMNTSceneApiView from backend.flow.views.spider_reduce_nodes import ReduceSpiderNodesSceneApiView from backend.flow.views.spider_semantic_check import SpiderSemanticCheckSceneApiView @@ -331,7 +334,9 @@ url(r"^scene/enable_mysql_single$", EnableMySQLSingleSceneApiView.as_view()), url(r"^scene/mysql_checksum$", MysqlChecksumSceneApiView.as_view()), url(r"^scene/mysql_partition$", MysqlPartitionSceneApiView.as_view()), + url(r"^scene/mysql_partition_cron$", MysqlPartitionCronSceneApiView.as_view()), url(r"^scene/spider_partition$", SpiderPartitionSceneApiView.as_view()), + url(r"^scene/spider_partition_cron$", SpiderPartitionCronSceneApiView.as_view()), url(r"^scene/tendbha_truncate_data$", MySQLHATruncateDataView.as_view()), url(r"^scene/import_sqlfile$", ImportSQLFileSceneApiView.as_view()), url(r"^scene/switch_mysql_proxy$", SwitchMySQLProxySceneApiView.as_view()), @@ -504,4 +509,5 @@ url(r"^scene/replace_doris$", ReplaceDorisSceneApiView.as_view()), url("^scene/mysql_data_migrate$", MysqlDataMigrateSceneApiView.as_view()), url("^scene/download_dbactor$", DownloadDbactorApiView.as_view()), + url("^scene/download_file$", DownloadFileApiView.as_view()), ] diff --git a/dbm-ui/backend/flow/utils/base/payload_handler.py b/dbm-ui/backend/flow/utils/base/payload_handler.py index d9dfccb4a3..4e25509e73 100644 --- a/dbm-ui/backend/flow/utils/base/payload_handler.py +++ b/dbm-ui/backend/flow/utils/base/payload_handler.py @@ -107,6 +107,7 @@ def get_mysql_account(self) -> dict: {"username": UserName.OS_MYSQL.value, "component": MySQLPrivComponent.MYSQL.value}, {"username": UserName.REPL.value, "component": MySQLPrivComponent.MYSQL.value}, {"username": UserName.YW.value, "component": MySQLPrivComponent.MYSQL.value}, + {"username": UserName.PARTITION_YW.value, "component": MySQLPrivComponent.MYSQL.value}, ], } ) diff --git a/dbm-ui/backend/flow/utils/mysql/mysql_act_dataclass.py b/dbm-ui/backend/flow/utils/mysql/mysql_act_dataclass.py index f1376eded0..2ab9ca14bf 100644 --- a/dbm-ui/backend/flow/utils/mysql/mysql_act_dataclass.py +++ b/dbm-ui/backend/flow/utils/mysql/mysql_act_dataclass.py @@ -428,3 +428,12 @@ class InitCheckForResourceKwargs: ips: list bk_biz_id: int = env.DBA_APP_BK_BIZ_ID + + +@dataclass() +class IpKwargs: + """ + 制定ip 专属变量的dataclass + """ + + ip: str diff --git a/dbm-ui/backend/flow/utils/mysql/mysql_act_playload.py b/dbm-ui/backend/flow/utils/mysql/mysql_act_playload.py index a32b1d5212..23df54ff03 100644 --- a/dbm-ui/backend/flow/utils/mysql/mysql_act_playload.py +++ b/dbm-ui/backend/flow/utils/mysql/mysql_act_playload.py @@ -49,7 +49,6 @@ from backend.flow.utils.mysql.proxy_act_payload import ProxyActPayload from backend.flow.utils.tbinlogdumper.tbinlogdumper_act_payload import TBinlogDumperActPayload from backend.ticket.constants import TicketType -from backend.ticket.models import Ticket logger = logging.getLogger("flow") @@ -936,27 +935,30 @@ def get_partition_payload(self, **kwargs) -> dict: """ 表分区 """ - ticket = Ticket.objects.get(id=self.ticket_data["uid"]) - if self.ticket_data["ticket_type"] == TicketType.MYSQL_PARTITION: - shard_name = "" - else: - shard_name = self.cluster["shard_name"] return { "db_type": DBActuatorTypeEnum.MySQL.value, "action": DBActuatorActionEnum.Partition.value, "payload": { "general": {"runtime_account": self.account}, "extend": { - "bk_biz_id": self.ticket_data["bk_biz_id"], - "db_app_abbr": self.ticket_data["db_app_abbr"], - "bk_biz_name": self.ticket_data["bk_biz_name"], - "cluster_id": self.ticket_data["cluster_id"], - "immute_domain": self.ticket_data["immute_domain"], - "master_ip": self.cluster["ip"], - "master_port": self.cluster["port"], - "shard_name": shard_name, - "ticket": ticket.url, - "file_path": self.cluster["file_path"], + "ip": self.cluster["ip"], + "file_path": os.path.join(BK_PKG_INSTALL_PATH, "partition", self.cluster["file_path"]), + }, + }, + } + + def get_partition_cron_payload(self, **kwargs) -> dict: + """ + 表分区 + """ + return { + "db_type": DBActuatorTypeEnum.MySQL.value, + "action": DBActuatorActionEnum.Partition.value, + "payload": { + "general": {"runtime_account": self.account}, + "extend": { + "ip": self.ticket_data["ip"], + "file_path": os.path.join(BK_PKG_INSTALL_PATH, "partition", self.ticket_data["file_name"]), }, }, } diff --git a/dbm-ui/backend/flow/utils/mysql/mysql_context_dataclass.py b/dbm-ui/backend/flow/utils/mysql/mysql_context_dataclass.py index 7f1a221c0e..bafdac3f06 100644 --- a/dbm-ui/backend/flow/utils/mysql/mysql_context_dataclass.py +++ b/dbm-ui/backend/flow/utils/mysql/mysql_context_dataclass.py @@ -193,7 +193,7 @@ class MysqlPartitionContext: 定义checksum的上下文dataclass """ - execute_objects: dict = None + partition_report: dict = None @dataclass() diff --git a/dbm-ui/backend/flow/views/download_file.py b/dbm-ui/backend/flow/views/download_file.py new file mode 100644 index 0000000000..96d712cd39 --- /dev/null +++ b/dbm-ui/backend/flow/views/download_file.py @@ -0,0 +1,31 @@ +""" +TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-DB管理系统(BlueKing-BK-DBM) available. +Copyright (C) 2017-2023 THL A29 Limited, a Tencent company. All rights reserved. +Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. +You may obtain a copy of the License at https://opensource.org/licenses/MIT +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on +an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the +specific language governing permissions and limitations under the License. +""" +import logging + +from rest_framework.response import Response + +from backend.flow.engine.controller.mysql import MySQLController +from backend.flow.views.base import FlowTestView +from backend.utils.basic import generate_root_id + +logger = logging.getLogger("root") + + +class DownloadFileApiView(FlowTestView): + """ + api: /apis/v1/flow/scene/download_file + params: + """ + + def post(self, request): + root_id = generate_root_id() + flow = MySQLController(root_id=root_id, ticket_data=request.data) + flow.download_file_scene() + return Response({"root_id": root_id}) diff --git a/dbm-ui/backend/flow/views/mysql_partition_cron.py b/dbm-ui/backend/flow/views/mysql_partition_cron.py new file mode 100644 index 0000000000..b1209a348b --- /dev/null +++ b/dbm-ui/backend/flow/views/mysql_partition_cron.py @@ -0,0 +1,23 @@ +# -*- coding: utf-8 -*- +""" +TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-DB管理系统(BlueKing-BK-DBM) available. +Copyright (C) 2017-2023 THL A29 Limited, a Tencent company. All rights reserved. +Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. +You may obtain a copy of the License at https://opensource.org/licenses/MIT +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on +an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the +specific language governing permissions and limitations under the License. +""" +from rest_framework.response import Response + +from backend.flow.engine.controller.mysql import MySQLController +from backend.flow.views.base import FlowTestView +from backend.utils.basic import generate_root_id + + +class MysqlPartitionCronSceneApiView(FlowTestView): + def post(self, request): + root_id = generate_root_id() + flow = MySQLController(root_id=root_id, ticket_data=request.data) + flow.mysql_partition_cron() + return Response({"root_id": root_id}) diff --git a/dbm-ui/backend/flow/views/spider_partition_cron.py b/dbm-ui/backend/flow/views/spider_partition_cron.py new file mode 100644 index 0000000000..d8a28797ff --- /dev/null +++ b/dbm-ui/backend/flow/views/spider_partition_cron.py @@ -0,0 +1,23 @@ +# -*- coding: utf-8 -*- +""" +TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-DB管理系统(BlueKing-BK-DBM) available. +Copyright (C) 2017-2023 THL A29 Limited, a Tencent company. All rights reserved. +Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. +You may obtain a copy of the License at https://opensource.org/licenses/MIT +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on +an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the +specific language governing permissions and limitations under the License. +""" +from rest_framework.response import Response + +from backend.flow.engine.controller.spider import SpiderController +from backend.flow.views.base import FlowTestView +from backend.utils.basic import generate_root_id + + +class SpiderPartitionCronSceneApiView(FlowTestView): + def post(self, request): + root_id = generate_root_id() + flow = SpiderController(root_id=root_id, ticket_data=request.data) + flow.spider_partition_cron() + return Response({"root_id": root_id}) diff --git a/dbm-ui/backend/ticket/builders/mysql/mysql_partition_cron.py b/dbm-ui/backend/ticket/builders/mysql/mysql_partition_cron.py new file mode 100644 index 0000000000..f15828b560 --- /dev/null +++ b/dbm-ui/backend/ticket/builders/mysql/mysql_partition_cron.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +""" +TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-DB管理系统(BlueKing-BK-DBM) available. +Copyright (C) 2017-2023 THL A29 Limited, a Tencent company. All rights reserved. +Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. +You may obtain a copy of the License at https://opensource.org/licenses/MIT +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on +an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the +specific language governing permissions and limitations under the License. +""" + +from django.utils.translation import gettext_lazy as _ +from rest_framework import serializers + +from backend.db_meta.models import AppCache +from backend.flow.engine.controller.mysql import MySQLController +from backend.ticket import builders +from backend.ticket.builders.mysql.base import BaseMySQLTicketFlowBuilder, MySQLBaseOperateDetailSerializer +from backend.ticket.constants import TicketType + + +class MysqlPartitionCronDetailSerializer(MySQLBaseOperateDetailSerializer): + class PartitionInfoSerializer(serializers.Serializer): + ip = serializers.CharField(help_text=_("服务器IP")) + bk_cloud_id = serializers.IntegerField(help_text=_("云区域ID")) + file_name = serializers.CharField(help_text=_("分区文件名")) + + infos = serializers.ListSerializer(help_text=_("分区信息"), child=PartitionInfoSerializer()) + + +class MysqlPartitionCronParamBuilder(builders.FlowParamBuilder): + controller = MySQLController.mysql_partition_cron + + def format_ticket_data(self): + app = AppCache.objects.get(bk_biz_id=self.ticket_data["bk_biz_id"]) + self.ticket_data.update(bk_biz_name=app.bk_biz_name, db_app_abbr=app.db_app_abbr) + + +@builders.BuilderFactory.register(TicketType.MYSQL_PARTITION_CRON) +class MysqlPartitionCronFlowBuilder(BaseMySQLTicketFlowBuilder): + serializer = MysqlPartitionCronDetailSerializer + inner_flow_builder = MysqlPartitionCronParamBuilder + inner_flow_name = _("分区管理定时任务执行") + default_need_itsm = False + default_need_manual_confirm = False diff --git a/dbm-ui/backend/ticket/builders/tendbcluster/tendb_partition_cron.py b/dbm-ui/backend/ticket/builders/tendbcluster/tendb_partition_cron.py new file mode 100644 index 0000000000..27d563a7a8 --- /dev/null +++ b/dbm-ui/backend/ticket/builders/tendbcluster/tendb_partition_cron.py @@ -0,0 +1,46 @@ +# -*- coding: utf-8 -*- +""" +TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-DB管理系统(BlueKing-BK-DBM) available. +Copyright (C) 2017-2023 THL A29 Limited, a Tencent company. All rights reserved. +Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. +You may obtain a copy of the License at https://opensource.org/licenses/MIT +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on +an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the +specific language governing permissions and limitations under the License. +""" + +from django.utils.translation import gettext_lazy as _ +from rest_framework import serializers + +from backend.db_meta.models import AppCache +from backend.flow.engine.controller.spider import SpiderController +from backend.ticket import builders +from backend.ticket.builders.mysql.base import MySQLBaseOperateDetailSerializer +from backend.ticket.builders.tendbcluster.base import BaseTendbTicketFlowBuilder +from backend.ticket.constants import TicketType + + +class SpiderPartitionCronDetailSerializer(MySQLBaseOperateDetailSerializer): + class PartitionInfoSerializer(serializers.Serializer): + ip = serializers.CharField(help_text=_("服务器IP")) + bk_cloud_id = serializers.IntegerField(help_text=_("云区域ID")) + file_name = serializers.CharField(help_text=_("分区文件名")) + + infos = serializers.ListSerializer(help_text=_("分区信息"), child=PartitionInfoSerializer()) + + +class SpiderPartitionCronParamBuilder(builders.FlowParamBuilder): + controller = SpiderController.spider_partition_cron + + def format_ticket_data(self): + app = AppCache.objects.get(bk_biz_id=self.ticket_data["bk_biz_id"]) + self.ticket_data.update(bk_biz_name=app.bk_biz_name, db_app_abbr=app.db_app_abbr) + + +@builders.BuilderFactory.register(TicketType.TENDBCLUSTER_PARTITION_CRON) +class SpiderPartitionCronFlowBuilder(BaseTendbTicketFlowBuilder): + serializer = SpiderPartitionCronDetailSerializer + inner_flow_builder = SpiderPartitionCronParamBuilder + inner_flow_name = _("分区管理定时任务执行") + default_need_itsm = False + default_need_manual_confirm = False diff --git a/dbm-ui/backend/ticket/constants.py b/dbm-ui/backend/ticket/constants.py index e65f1a3558..ad666d187a 100644 --- a/dbm-ui/backend/ticket/constants.py +++ b/dbm-ui/backend/ticket/constants.py @@ -186,6 +186,7 @@ def get_db_type_by_ticket(cls, ticket_type, raise_exception=False): MYSQL_HA_DB_TABLE_BACKUP = TicketEnumField("MYSQL_HA_DB_TABLE_BACKUP", _("MySQL 高可用库表备份"), _("备份")) MYSQL_CHECKSUM = TicketEnumField("MYSQL_CHECKSUM", _("MySQL 数据校验修复"), _("数据处理")) MYSQL_PARTITION = TicketEnumField("MYSQL_PARTITION", _("MySQL 分区"), _("分区管理")) + MYSQL_PARTITION_CRON = TicketEnumField("MYSQL_PARTITION_CRON", _("MySQL 分区定时任务"), _("分区管理定时任务")) MYSQL_DATA_REPAIR = TicketEnumField("MYSQL_DATA_REPAIR", _("MySQL 数据修复"), register_iam=False) MYSQL_FLASHBACK = TicketEnumField("MYSQL_FLASHBACK", _("MySQL 闪回"), _("回档")) MYSQL_ROLLBACK_CLUSTER = TicketEnumField("MYSQL_ROLLBACK_CLUSTER", _("MySQL 定点构造"), _("回档")) @@ -203,6 +204,8 @@ def get_db_type_by_ticket(cls, ticket_type, raise_exception=False): TENDBCLUSTER_CHECKSUM = TicketEnumField("TENDBCLUSTER_CHECKSUM", _("TenDB Cluster 数据校验修复"), _("数据处理")) TENDBCLUSTER_DATA_REPAIR = TicketEnumField("TENDBCLUSTER_DATA_REPAIR", _("TenDB Cluster 数据修复"), register_iam=False) TENDBCLUSTER_PARTITION = TicketEnumField("TENDBCLUSTER_PARTITION", _("TenDB Cluster 分区管理"), _("分区管理")) + TENDBCLUSTER_PARTITION_CRON = TicketEnumField("TENDBCLUSTER_PARTITION_CRON", _("TenDB Cluster 分区定时任务"), + _("分区管理定时任务")) TENDBCLUSTER_DB_TABLE_BACKUP = TicketEnumField("TENDBCLUSTER_DB_TABLE_BACKUP", _("TenDB Cluster 库表备份"), _("备份")) TENDBCLUSTER_RENAME_DATABASE = TicketEnumField("TENDBCLUSTER_RENAME_DATABASE", _("TenDB Cluster 数据库重命名"), _("SQL 任务")) # noqa TENDBCLUSTER_TRUNCATE_DATABASE = TicketEnumField("TENDBCLUSTER_TRUNCATE_DATABASE", _("TenDB Cluster 清档"), _("数据处理")) diff --git a/dbm-ui/config/prod.py b/dbm-ui/config/prod.py index c73b6d6679..8c5837f541 100644 --- a/dbm-ui/config/prod.py +++ b/dbm-ui/config/prod.py @@ -11,7 +11,7 @@ from .default import * # pylint: disable=wildcard-import from corsheaders.defaults import default_headers -DEBUG = False +DEBUG = True LOGGING = get_logging_config(os.path.join(BK_LOG_DIR, APP_CODE), "INFO") diff --git a/dbm-ui/config/stag.py b/dbm-ui/config/stag.py index f281d5ccea..037a60a644 100644 --- a/dbm-ui/config/stag.py +++ b/dbm-ui/config/stag.py @@ -10,7 +10,7 @@ """ from .default import * # pylint: disable=wildcard-import -DEBUG = True +DEBUG = False LOGGING = get_logging_config(os.path.join(BK_LOG_DIR, APP_CODE), "INFO") diff --git a/dbm-ui/poetry.lock b/dbm-ui/poetry.lock index 9ebf104bd0..88e996802d 100644 --- a/dbm-ui/poetry.lock +++ b/dbm-ui/poetry.lock @@ -3606,4 +3606,4 @@ files = [ [metadata] lock-version = "2.0" python-versions = "~3.10.0" -content-hash = "7238bb03799c3d168f56d7bd9cb1d40eac10623bd636d454e3e44f48b5c84a2e" +content-hash = "7238bb03799c3d168f56d7bd9cb1d40eac10623bd636d454e3e44f48b5c84a2e" \ No newline at end of file diff --git a/helm-charts/bk-dbm/Chart.lock b/helm-charts/bk-dbm/Chart.lock index 24bc3287cc..c2df4f02f6 100644 --- a/helm-charts/bk-dbm/Chart.lock +++ b/helm-charts/bk-dbm/Chart.lock @@ -28,7 +28,7 @@ dependencies: version: 0.1.33 - name: dbpartition repository: file://charts/dbpartition - version: 0.1.11 + version: 0.1.13 - name: db-simulation repository: file://charts/db-simulation version: 0.1.11 @@ -56,5 +56,5 @@ dependencies: - name: backup-consumer repository: file://charts/backup-consumer version: 0.0.3 -digest: sha256:a2b8e77575bf8401c4d96083ed6d3cfd3825b1b865ed87e67b6cb04cb2de3487 -generated: "2024-06-12T15:48:36.691617+08:00" +digest: sha256:ae650cea366c4e8836aa05a8031d52dd3f38ef591c929b0f881e9b7e4281ec23 +generated: "2024-06-17T10:56:27.830979+08:00" diff --git a/helm-charts/bk-dbm/Chart.yaml b/helm-charts/bk-dbm/Chart.yaml index 76db97e57f..17f8f8ef23 100644 --- a/helm-charts/bk-dbm/Chart.yaml +++ b/helm-charts/bk-dbm/Chart.yaml @@ -39,7 +39,7 @@ dependencies: - condition: dbpartition.enabled name: dbpartition repository: file://charts/dbpartition - version: 0.1.11 + version: 0.1.13 - condition: db-simulation.enabled name: db-simulation repository: file://charts/db-simulation diff --git a/helm-charts/bk-dbm/charts/dbpartition/Chart.yaml b/helm-charts/bk-dbm/charts/dbpartition/Chart.yaml index 95af589cf8..dd032753b1 100644 --- a/helm-charts/bk-dbm/charts/dbpartition/Chart.yaml +++ b/helm-charts/bk-dbm/charts/dbpartition/Chart.yaml @@ -1,6 +1,6 @@ apiVersion: v2 -appVersion: 0.0.1-alpha.182 +appVersion: 0.0.1-alpha.209 name: dbpartition description: A Helm chart for db partition service type: application -version: 0.1.11 +version: 0.1.13 diff --git a/helm-charts/bk-dbm/templates/configmaps/dbpartition-configmap.yaml b/helm-charts/bk-dbm/templates/configmaps/dbpartition-configmap.yaml index cdbe86b19b..4a7af8b63f 100644 --- a/helm-charts/bk-dbm/templates/configmaps/dbpartition-configmap.yaml +++ b/helm-charts/bk-dbm/templates/configmaps/dbpartition-configmap.yaml @@ -22,6 +22,8 @@ data: DB_REMOTE_SERVICE: "{{ .Values.dbpartition.envs.DB_REMOTE_SERVICE }}" CRON_RETRY_HOUR: "{{ .Values.dbpartition.envs.CRON_RETRY_HOUR }}" CRON_TIMING_HOUR: "{{ .Values.dbpartition.envs.CRON_TIMING_HOUR }}" + ALARM_TIMEZONE: "{{ .Values.dbpartition.envs.ALARM_TIMEZONE }}" + ALARM_HOUR: "{{ .Values.dbpartition.envs.ALARM_HOUR }}" DBM_TICKET_SERVICE: "{{ .Values.dbpartition.envs.DBM_TICKET_SERVICE }}" LISTEN_ADDRESS: "{{ .Values.dbpartition.envs.LISTEN_ADDRESS }}" DB_META_SERVICE: "{{ .Values.dbpartition.envs.DB_META_SERVICE }}" @@ -36,4 +38,10 @@ data: REDIS_HOST: "{{ .Values.externalRedis.host }}" REDIS_PORT: "{{ .Values.externalRedis.port }}" REDIS_PASSWORD: "{{ .Values.externalRedis.password }}" + # BKREPO + BKREPO_PROJECT: "{{ .Values.bk.bkRepoProject }}" + BKREPO_PUBLIC_BUCKET: "{{ .Values.bk.bkRepoPublicBucket }}" + BKREPO_USERNAME: "{{ .Values.bk.bkRepoUsername }}" + BKREPO_PASSWORD: "{{ .Values.bk.bkRepoPassword }}" + BKREPO_ENDPOINT_URL: "{{ .Values.bk.bkRepoUrl }}" {{- end }} \ No newline at end of file diff --git a/helm-charts/bk-dbm/values.yaml b/helm-charts/bk-dbm/values.yaml index 018d4cf89e..857cd85049 100644 --- a/helm-charts/bk-dbm/values.yaml +++ b/helm-charts/bk-dbm/values.yaml @@ -229,6 +229,8 @@ dbpartition: DB_REMOTE_SERVICE: "http://bk-dbm/apis/proxypass/drs/" CRON_RETRY_HOUR: "9,15" CRON_TIMING_HOUR: "3" + ALARM_TIMEZONE: "UTC+8" + ALARM_HOUR: "11" DBM_TICKET_SERVICE: "http://bk-dbm/apis/" LISTEN_ADDRESS: "0.0.0.0:80" DB_META_SERVICE: "http://bk-dbm" From f0af3e3f2304154e4f9172fd922190d0e04415d9 Mon Sep 17 00:00:00 2001 From: hlinx <327159425@qq.com> Date: Tue, 18 Jun 2024 10:00:50 +0800 Subject: [PATCH 02/29] =?UTF-8?q?fix(frontend):=20=E5=AF=B9=E5=A4=96?= =?UTF-8?q?=E7=89=88=E6=9D=83=E9=99=90=E6=8F=90=E7=A4=BA=20#5000=20#=20Rev?= =?UTF-8?q?iewed,=20transaction=20id:=209972?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../src/components/apply-permission/render-result.vue | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/dbm-ui/frontend/src/components/apply-permission/render-result.vue b/dbm-ui/frontend/src/components/apply-permission/render-result.vue index 8549854836..326dc9f0f9 100644 --- a/dbm-ui/frontend/src/components/apply-permission/render-result.vue +++ b/dbm-ui/frontend/src/components/apply-permission/render-result.vue @@ -19,6 +19,9 @@ src="/images/no-permission.svg" />

{{ t('该操作需要以下权限') }} + + {{ t(',请联系运维人员开通权限') }} +

@@ -62,11 +65,15 @@ import type ApplyDataModel from '@services/model/iam/apply-data'; + import { useSystemEnviron } from '@stores'; + interface Props { data: ApplyDataModel; } defineProps(); + const { t } = useI18n(); + const { urls } = useSystemEnviron(); From 177a20a133d1bed767f77b9982eefa8463b87953 Mon Sep 17 00:00:00 2001 From: yyhenryyy Date: Fri, 17 May 2024 14:48:49 +0800 Subject: [PATCH 11/29] =?UTF-8?q?feat(mongodb):=20=E5=85=BC=E5=AE=B9percon?= =?UTF-8?q?a=20mongodb=EF=BC=8C=E6=89=A7=E8=A1=8C=E8=84=9A=E6=9C=AC?= =?UTF-8?q?=E6=B7=BB=E5=8A=A0dns=E8=A7=A3=E6=9E=90=20#4478?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../dbactuator/pkg/jobruntime/jobruntime.go | 2 +- .../db_meta/api/cluster/mongorepset/create.py | 2 +- .../management/commands/sync_from_bkrepo.py | 1 + .../engine/bamboo/scene/common/get_file_list.py | 12 +++++------- .../bamboo/scene/mongodb/mongodb_exec_script.py | 1 + .../bamboo/scene/mongodb/mongodb_install.py | 5 ++++- .../bamboo/scene/mongodb/mongodb_install_dbmon.py | 3 ++- .../bamboo/scene/mongodb/sub_task/exec_script.py | 15 +++++++++++++++ dbm-ui/backend/flow/utils/mongodb/mongodb_repo.py | 14 +++++++++++--- 9 files changed, 41 insertions(+), 14 deletions(-) diff --git a/dbm-services/mongodb/db-tools/dbactuator/pkg/jobruntime/jobruntime.go b/dbm-services/mongodb/db-tools/dbactuator/pkg/jobruntime/jobruntime.go index fb4526d086..bd7b7a12a0 100644 --- a/dbm-services/mongodb/db-tools/dbactuator/pkg/jobruntime/jobruntime.go +++ b/dbm-services/mongodb/db-tools/dbactuator/pkg/jobruntime/jobruntime.go @@ -78,7 +78,7 @@ func NewJobGenericRuntime(uid, rootID string, // SetLogger set logger func (r *JobGenericRuntime) SetLogger() { var err error - logFile := fmt.Sprintf("redis_actuator_%s_%s.log", r.UID, r.NodeID) + logFile := fmt.Sprintf("mongo_actuator_%s_%s.log", r.UID, r.NodeID) err = util.MkDirsIfNotExists([]string{logDir}) if err != nil { panic(err) diff --git a/dbm-ui/backend/db_meta/api/cluster/mongorepset/create.py b/dbm-ui/backend/db_meta/api/cluster/mongorepset/create.py index e43a9ea0e7..254d79d15c 100644 --- a/dbm-ui/backend/db_meta/api/cluster/mongorepset/create.py +++ b/dbm-ui/backend/db_meta/api/cluster/mongorepset/create.py @@ -169,4 +169,4 @@ def create_mongoset( logger.error(traceback.format_exc()) raise Exception("mongoset add dns entry failed {}".format(e)) - MongoDBCCTopoOperator(cluster).transfer_instances_to_cluster_module(storage_objs) + MongoDBCCTopoOperator(cluster).transfer_instances_to_cluster_module(storage_objs, is_increment=True) diff --git a/dbm-ui/backend/dbm_init/management/commands/sync_from_bkrepo.py b/dbm-ui/backend/dbm_init/management/commands/sync_from_bkrepo.py index c51c8d2c85..b8c3735618 100644 --- a/dbm-ui/backend/dbm_init/management/commands/sync_from_bkrepo.py +++ b/dbm-ui/backend/dbm_init/management/commands/sync_from_bkrepo.py @@ -54,6 +54,7 @@ def handle(self, *args, **options): "size": media["size"], "md5": media["md5"], "create_at": create_at, + "update_at": media["lastModifiedDate"], }, db_type=db_type, pkg_type=pkg_type["name"], diff --git a/dbm-ui/backend/flow/engine/bamboo/scene/common/get_file_list.py b/dbm-ui/backend/flow/engine/bamboo/scene/common/get_file_list.py index 5c0b3b4e34..2dca11c759 100644 --- a/dbm-ui/backend/flow/engine/bamboo/scene/common/get_file_list.py +++ b/dbm-ui/backend/flow/engine/bamboo/scene/common/get_file_list.py @@ -613,20 +613,18 @@ def get_sqlserver_package(self, db_version: str): f"{env.BKREPO_PROJECT}/{env.BKREPO_BUCKET}/{self.actuator_pkg.path}", ] - def mongodb_pkg(self, db_version: str) -> list: + def mongodb_pkg(self, db_version: str, release_info: str) -> list: """ 部署mongodb,需要的pkg包 """ - mongodb_pkg = Package.get_latest_package( - version=db_version, pkg_type=MediumEnum.MongoDB, db_type=DBType.MongoDB - ) + mongodb_pkg = Package.get_latest_package(version=db_version, pkg_type=release_info, db_type=DBType.MongoDB) # bkdbmon_pkg = Package.get_latest_package( # version=MediumEnum.Latest, pkg_type=MediumEnum.DbMon, db_type=DBType.MongoDB # ) return [ - f"{env.BKREPO_PROJECT}/{env.BKREPO_BUCKET}{self.actuator_pkg.path}", - f"{env.BKREPO_PROJECT}/{env.BKREPO_BUCKET}{mongodb_pkg.path}", + f"{env.BKREPO_PROJECT}/{env.BKREPO_BUCKET}/{self.actuator_pkg.path}", + f"{env.BKREPO_PROJECT}/{env.BKREPO_BUCKET}/{mongodb_pkg.path}", # f"{env.BKREPO_PROJECT}/{env.BKREPO_BUCKET}/{bkdbmon_pkg.path}", ] @@ -636,5 +634,5 @@ def mongodb_actuator_pkg(self) -> list: """ return [ - f"{env.BKREPO_PROJECT}/{env.BKREPO_BUCKET}{self.actuator_pkg.path}", + f"{env.BKREPO_PROJECT}/{env.BKREPO_BUCKET}/{self.actuator_pkg.path}", ] diff --git a/dbm-ui/backend/flow/engine/bamboo/scene/mongodb/mongodb_exec_script.py b/dbm-ui/backend/flow/engine/bamboo/scene/mongodb/mongodb_exec_script.py index cb15efa0c3..9386c2cd2a 100644 --- a/dbm-ui/backend/flow/engine/bamboo/scene/mongodb/mongodb_exec_script.py +++ b/dbm-ui/backend/flow/engine/bamboo/scene/mongodb/mongodb_exec_script.py @@ -32,6 +32,7 @@ def __init__(self, root_id: str, data: Optional[Dict]): self.data = data self.get_kwargs = ActKwargs() self.get_kwargs.payload = data + self.get_kwargs.root_id = root_id def multi_cluster_exec_script_flow(self): """ diff --git a/dbm-ui/backend/flow/engine/bamboo/scene/mongodb/mongodb_install.py b/dbm-ui/backend/flow/engine/bamboo/scene/mongodb/mongodb_install.py index 9231e22bd2..6eb08c4853 100644 --- a/dbm-ui/backend/flow/engine/bamboo/scene/mongodb/mongodb_install.py +++ b/dbm-ui/backend/flow/engine/bamboo/scene/mongodb/mongodb_install.py @@ -119,7 +119,7 @@ def multi_replicaset_install_flow(self): kwargs=kwargs, ) - # dbmon + # # 安装dbmon # self.install_dbmon(data=self.data, pipeline=pipeline) # 运行流程 pipeline.run_pipeline() @@ -210,5 +210,8 @@ def cluster_install_flow(self): kwargs=kwargs, ) + # 安装dbmon + # self.install_dbmon(data=self.data, pipeline=pipeline) + # 运行流程 pipeline.run_pipeline() diff --git a/dbm-ui/backend/flow/engine/bamboo/scene/mongodb/mongodb_install_dbmon.py b/dbm-ui/backend/flow/engine/bamboo/scene/mongodb/mongodb_install_dbmon.py index 0cef1e0513..101a7a536f 100644 --- a/dbm-ui/backend/flow/engine/bamboo/scene/mongodb/mongodb_install_dbmon.py +++ b/dbm-ui/backend/flow/engine/bamboo/scene/mongodb/mongodb_install_dbmon.py @@ -53,7 +53,8 @@ def add_install_dbmon(flow, flow_data, pipeline, iplist, bk_cloud_id, allow_empt """ allow_empty_instance 上架流程中,允许ip没有实例. allow_empty_instance = True """ - actuator_workdir = ActKwargs().get_mongodb_os_conf()["file_path"] + + actuator_workdir = flow.get_kwargs.file_path pkg_info = get_pkg_info() file_list = [ "{}/{}/{}".format(env.BKREPO_PROJECT, env.BKREPO_BUCKET, pkg_info.get("actuator_pkg").path), diff --git a/dbm-ui/backend/flow/engine/bamboo/scene/mongodb/sub_task/exec_script.py b/dbm-ui/backend/flow/engine/bamboo/scene/mongodb/sub_task/exec_script.py index c20f472d25..dc724ff2a9 100644 --- a/dbm-ui/backend/flow/engine/bamboo/scene/mongodb/sub_task/exec_script.py +++ b/dbm-ui/backend/flow/engine/bamboo/scene/mongodb/sub_task/exec_script.py @@ -15,10 +15,13 @@ from django.utils.translation import ugettext as _ from backend.flow.consts import MongoDBManagerUser +from backend.flow.engine.bamboo.scene.common.atom_jobs.set_dns_sub_job import set_dns_atom_job from backend.flow.engine.bamboo.scene.common.builder import SubBuilder from backend.flow.plugins.components.collections.mongodb.exec_actuator_job import ExecuteDBActuatorJobComponent from backend.flow.plugins.components.collections.mongodb.send_media import ExecSendMediaOperationComponent +from backend.flow.utils.common_act_dataclass import DNSContext from backend.flow.utils.mongodb.mongodb_dataclass import ActKwargs +from backend.flow.utils.redis.redis_context_dataclass import ActKwargs as RedisActKwargs def exec_script(root_id: str, ticket_data: Optional[Dict], sub_kwargs: ActKwargs, cluster_id: int) -> SubBuilder: @@ -42,6 +45,18 @@ def exec_script(root_id: str, ticket_data: Optional[Dict], sub_kwargs: ActKwargs act_name=_("MongoDB-介质下发"), act_component_code=ExecSendMediaOperationComponent.code, kwargs=kwargs ) + # 设置dns解析 + redis_actkwargs = RedisActKwargs() + redis_actkwargs.cluster = {} + redis_actkwargs.set_trans_data_dataclass = DNSContext.__name__ + redis_actkwargs.is_update_trans_data = True + redis_actkwargs.bk_cloud_id = sub_get_kwargs.payload["bk_cloud_id"] + kwargs = sub_get_kwargs.get_set_dns_resolv_kwargs() + sub_sub_pipeline = set_dns_atom_job( + root_id=sub_get_kwargs.root_id, ticket_data=sub_get_kwargs.payload, act_kwargs=redis_actkwargs, param=kwargs + ) + sub_pipeline.add_sub_pipeline(sub_sub_pipeline) + # 创建原子任务执行目录 kwargs = sub_get_kwargs.get_create_dir_kwargs() sub_pipeline.add_act( diff --git a/dbm-ui/backend/flow/utils/mongodb/mongodb_repo.py b/dbm-ui/backend/flow/utils/mongodb/mongodb_repo.py index 252542929e..32b6e9c2d8 100644 --- a/dbm-ui/backend/flow/utils/mongodb/mongodb_repo.py +++ b/dbm-ui/backend/flow/utils/mongodb/mongodb_repo.py @@ -98,6 +98,7 @@ def __init__( bk_biz_id: int = None, immute_domain: str = None, app: str = None, + region: str = None, ): self.cluster_id = cluster_id self.name = name @@ -107,6 +108,7 @@ def __init__( self.immute_domain = immute_domain self.bk_cloud_id = bk_cloud_id self.app = app + self.region = region @abstractmethod def get_shards(self): @@ -136,6 +138,7 @@ def __init__( bk_biz_id=None, immute_domain=None, app: str = None, + region: str = None, shard: ReplicaSet = None, ): super().__init__( @@ -147,6 +150,7 @@ def __init__( bk_biz_id, immute_domain, app, + region, ) self.shard = shard @@ -176,6 +180,7 @@ def __init__( bk_biz_id=None, immute_domain=None, app: str = None, + region: str = None, shards: List[ReplicaSet] = None, mongos: List[MongoNode] = None, configsvr: ReplicaSet = None, @@ -189,6 +194,7 @@ def __init__( bk_biz_id, immute_domain, app, + region, ) self.shards = shards self.mongos = mongos @@ -232,6 +238,7 @@ def fetch_many_cluster(cls, set_get_domain: bool, **kwargs): immute_domain=i.immute_domain, app=None, # app和bk_biz_id是1-1的关系,有一个就够了 shard=shard, + region=i.region, ) rows.append(row) @@ -263,6 +270,7 @@ def fetch_many_cluster(cls, set_get_domain: bool, **kwargs): mongos=mongos, shards=shards, configsvr=configsvr, + region=i.region, ) rows.append(row) @@ -277,8 +285,8 @@ def fetch_one_cluster(cls, set_get_domain: bool, **kwargs): return None @classmethod - def fetch_many_cluster_dict(cls, **kwargs): - clusters = cls.fetch_many_cluster(**kwargs) + def fetch_many_cluster_dict(cls, set_get_domain: bool, **kwargs): + clusters = cls.fetch_many_cluster(set_get_domain, **kwargs) clusters_map = {} for cluster in clusters: clusters_map[cluster.cluster_id] = cluster @@ -481,7 +489,7 @@ def from_hosts(iplist: List, bk_cloud_id: int) -> List: if not cluster_id_list: return instance_list - clusters = MongoRepository.fetch_many_cluster_dict(id__in=cluster_id_list) + clusters = MongoRepository.fetch_many_cluster_dict(set_get_domain=False, id__in=cluster_id_list) for cluster_id in clusters: cluster = clusters[cluster_id] for rs in cluster.get_shards(): From 5d470bd07facbb0d8f92d70cb854777fe35a7f89 Mon Sep 17 00:00:00 2001 From: ymakedaq <996156275@qq.com> Date: Fri, 10 May 2024 09:30:19 +0800 Subject: [PATCH 12/29] feat(backend): complete tendbha storage upgrade flow #4307 --- dbm-services/go.work.sum | 3 - .../scene/mysql/mysql_migrate_cluster_flow.py | 19 +- .../mysql_migrate_cluster_remote_flow.py | 11 +- .../bamboo/scene/mysql/mysql_proxy_upgrade.py | 39 ++- .../bamboo/scene/mysql/mysql_upgrade.py | 317 +++++++++++++++--- .../backend/flow/engine/controller/mysql.py | 13 +- .../backend/flow/utils/mysql/mysql_db_meta.py | 9 + 7 files changed, 342 insertions(+), 69 deletions(-) diff --git a/dbm-services/go.work.sum b/dbm-services/go.work.sum index ed12a15810..07c560c9d4 100644 --- a/dbm-services/go.work.sum +++ b/dbm-services/go.work.sum @@ -1352,9 +1352,6 @@ golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2 h1:IRJeR9r1pYWsHKTRe/IInb7lYvbBVIqOgsX/u0mbOWY= golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE= golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= -golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= -golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8= -golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= golang.org/x/term v0.19.0 h1:+ThwsDv+tYfnJFhF4L8jITxu1tdTWRTZpdsWgEgjL6Q= golang.org/x/term v0.19.0/go.mod h1:2CuTdWZ7KHSQwUzKva0cbMg6q2DMI3Mmxp+gKJbskEk= golang.org/x/text v0.10.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= diff --git a/dbm-ui/backend/flow/engine/bamboo/scene/mysql/mysql_migrate_cluster_flow.py b/dbm-ui/backend/flow/engine/bamboo/scene/mysql/mysql_migrate_cluster_flow.py index a51bbef0ff..8688c43711 100644 --- a/dbm-ui/backend/flow/engine/bamboo/scene/mysql/mysql_migrate_cluster_flow.py +++ b/dbm-ui/backend/flow/engine/bamboo/scene/mysql/mysql_migrate_cluster_flow.py @@ -68,7 +68,7 @@ def __init__(self, root_id: str, ticket_data: Optional[Dict]): self.ticket_data = ticket_data self.data = {} - def deploy_migrate_cluster_flow(self): + def deploy_migrate_cluster_flow(self, use_for_upgrade=False): """ 成对迁移集群主从节点。 增加单据临时ADMIN账号的添加和删除逻辑 @@ -98,6 +98,12 @@ def deploy_migrate_cluster_flow(self): slave = cluster_class.storageinstance_set.filter( instance_inner_role=InstanceInnerRole.SLAVE.value, is_stand_by=True ).first() + + # 如果是升级用途的话,需要改变module id + db_module_id = cluster_class.db_module_id + if use_for_upgrade: + db_module_id = self.data["new_db_module_id"] + self.data["master_ip"] = master_model.machine.ip self.data["cluster_type"] = cluster_class.cluster_type self.data["old_slave_ip"] = slave.machine.ip @@ -105,15 +111,22 @@ def deploy_migrate_cluster_flow(self): self.data["mysql_port"] = master_model.port self.data["bk_biz_id"] = cluster_class.bk_biz_id self.data["bk_cloud_id"] = cluster_class.bk_cloud_id - self.data["db_module_id"] = cluster_class.db_module_id + self.data["db_module_id"] = db_module_id self.data["time_zone"] = cluster_class.time_zone self.data["created_by"] = self.ticket_data["created_by"] - self.data["module"] = cluster_class.db_module_id + self.data["module"] = db_module_id self.data["ticket_type"] = self.ticket_data["ticket_type"] self.data["uid"] = self.ticket_data["uid"] self.data["package"] = Package.get_latest_package( version=cluster_class.major_version, pkg_type=MediumEnum.MySQL, db_type=DBType.MySQL ).name + + if use_for_upgrade: + new_major_version = self.data["new_mysql_version"] + self.data["package"] = Package.get_latest_package( + version=new_major_version, pkg_type=MediumEnum.MySQL, db_type=DBType.MySQL + ).name + self.data["ports"] = get_ports(info["cluster_ids"]) self.data["force"] = info.get("force", False) self.data["charset"], self.data["db_version"] = get_version_and_charset( diff --git a/dbm-ui/backend/flow/engine/bamboo/scene/mysql/mysql_migrate_cluster_remote_flow.py b/dbm-ui/backend/flow/engine/bamboo/scene/mysql/mysql_migrate_cluster_remote_flow.py index cd1cf340b2..878cbe6f89 100644 --- a/dbm-ui/backend/flow/engine/bamboo/scene/mysql/mysql_migrate_cluster_remote_flow.py +++ b/dbm-ui/backend/flow/engine/bamboo/scene/mysql/mysql_migrate_cluster_remote_flow.py @@ -72,7 +72,7 @@ def __init__(self, root_id: str, ticket_data: Optional[Dict]): # 定义备份文件存放到目标机器目录位置 self.backup_target_path = f"/data/dbbak/{self.root_id}" - def migrate_cluster_flow(self): + def migrate_cluster_flow(self, use_for_upgrade=False): """ 成对迁移集群主从节点。 元数据信息修改顺序: @@ -101,6 +101,11 @@ def migrate_cluster_flow(self): slave = cluster_class.storageinstance_set.filter( instance_inner_role=InstanceInnerRole.SLAVE.value, is_stand_by=True ).first() + # 如果是升级用途的话,需要改变module id + db_module_id = cluster_class.db_module_id + if use_for_upgrade: + db_module_id = self.data["new_db_module_id"] + self.data["master_ip"] = master_model.machine.ip self.data["cluster_type"] = cluster_class.cluster_type self.data["old_slave_ip"] = slave.machine.ip @@ -108,10 +113,10 @@ def migrate_cluster_flow(self): self.data["mysql_port"] = master_model.port self.data["bk_biz_id"] = cluster_class.bk_biz_id self.data["bk_cloud_id"] = cluster_class.bk_cloud_id - self.data["db_module_id"] = cluster_class.db_module_id + self.data["db_module_id"] = db_module_id self.data["time_zone"] = cluster_class.time_zone self.data["created_by"] = self.ticket_data["created_by"] - self.data["module"] = cluster_class.db_module_id + self.data["module"] = db_module_id self.data["ticket_type"] = self.ticket_data["ticket_type"] self.data["uid"] = self.ticket_data["uid"] self.data["package"] = Package.get_latest_package( diff --git a/dbm-ui/backend/flow/engine/bamboo/scene/mysql/mysql_proxy_upgrade.py b/dbm-ui/backend/flow/engine/bamboo/scene/mysql/mysql_proxy_upgrade.py index 245671ea19..8216f17e28 100644 --- a/dbm-ui/backend/flow/engine/bamboo/scene/mysql/mysql_proxy_upgrade.py +++ b/dbm-ui/backend/flow/engine/bamboo/scene/mysql/mysql_proxy_upgrade.py @@ -17,7 +17,7 @@ from backend.configuration.constants import DBType from backend.db_meta.exceptions import DBMetaException -from backend.db_meta.models import ProxyInstance +from backend.db_meta.models import Cluster, ProxyInstance from backend.flow.engine.bamboo.scene.common.builder import Builder, SubBuilder from backend.flow.engine.bamboo.scene.common.get_file_list import GetFileList from backend.flow.plugins.components.collections.mysql.exec_actuator_script import ExecuteDBActuatorScriptComponent @@ -34,6 +34,16 @@ class MySQLProxyLocalUpgradeFlow(object): """ mysql proxy 本地升级场景 + { + bk_biz_id: 0, + bk_cloud_id: 0, + infos:[ + { + cluster_ids:[], + new_proxy_version:"", + } + ] + } """ def __init__(self, root_id: str, data: Optional[Dict]): @@ -44,7 +54,9 @@ def __init__(self, root_id: str, data: Optional[Dict]): self.root_id = root_id self.data = data self.uid = data["uid"] + self.bk_cloud_id = data["bk_cloud_id"] self.new_proxy_version = data["new_proxy_version"] + self.upgrade_cluster_list = data["infos"] self.new_proxxy_version_num = proxy_version_parse(self.new_proxy_version) def __get_proxy_instance_by_host(self, proxy_ip: str) -> list: @@ -55,15 +67,18 @@ def upgrade_mysql_proxy_flow(self): proxy_upgrade_pipeline = Builder(root_id=self.root_id, data=self.data) sub_pipelines = [] # 声明子流程 - for proxy in self.data["proxy_ip_list"]: - logger.info(_("wait upgrade proxy detail {}".format(proxy["ip"]))) + for upgrade_info in self.upgrade_cluster_list: + cluster_ids = upgrade_info["cluster_ids"] + clusters = Cluster.objects.filter(id__in=cluster_ids) + proxies = ProxyInstance.objects.filter(cluster__in=clusters) + if len(proxies) <= 0: + raise DBMetaException(message=_("根据cluster ids:{}法找到对应的proxy实例").format(cluster_ids)) + sub_flow_context = copy.deepcopy(self.data) - sub_flow_context.pop("proxy_ip_list") sub_pipeline = SubBuilder(root_id=self.root_id, data=copy.deepcopy(sub_flow_context)) proxy_ports = [] - proxies = self.__get_proxy_instance_by_host(proxy["ip"]) - if len(proxies) <= 0: - raise DBMetaException(message=_("根据proxy ip {} 无法找到对应的实例记录").format(proxy["ip"])) + proxy_ip_list = [] + for proxy_instance in proxies: current_version = proxy_version_parse(proxy_instance.version) if current_version >= self.new_proxxy_version_num: @@ -74,11 +89,17 @@ def upgrade_mysql_proxy_flow(self): ) raise DBMetaException(message=_("待升级版本大于等于新版本,请确认升级的版本")) proxy_ports.append(proxy_instance.port) + proxy_ip_list.append(proxy_instance.machine.ip) + + if len(list(set(proxy_ip_list))) != 1: + raise DBMetaException(message=_("集群所属主机必须归属一个主机")) + + proxy_ip = proxy_ip_list[0] sub_pipeline.add_sub_pipeline( sub_flow=self.upgrade_mysql_proxy_subflow( - bk_cloud_id=proxy["bk_cloud_id"], - ip=proxy["ip"], + bk_cloud_id=self.bk_cloud_id, + ip=proxy_ip, proxy_ports=proxy_ports, ) ) diff --git a/dbm-ui/backend/flow/engine/bamboo/scene/mysql/mysql_upgrade.py b/dbm-ui/backend/flow/engine/bamboo/scene/mysql/mysql_upgrade.py index ba045a9cb8..731a6a0215 100644 --- a/dbm-ui/backend/flow/engine/bamboo/scene/mysql/mysql_upgrade.py +++ b/dbm-ui/backend/flow/engine/bamboo/scene/mysql/mysql_upgrade.py @@ -16,26 +16,109 @@ from django.utils.translation import ugettext as _ from backend.configuration.constants import MYSQL8_VER_PARSE_NUM, DBType +from backend.db_meta.enums import ClusterType, InstanceInnerRole, InstanceRole, MachineType from backend.db_meta.exceptions import DBMetaException -from backend.db_meta.models import StorageInstance +from backend.db_meta.models import Cluster, StorageInstance from backend.db_package.models import Package from backend.flow.consts import MediumEnum from backend.flow.engine.bamboo.scene.common.builder import Builder, SubBuilder from backend.flow.engine.bamboo.scene.common.get_file_list import GetFileList +from backend.flow.engine.bamboo.scene.mysql.common.master_and_slave_switch import master_and_slave_switch +from backend.flow.engine.bamboo.scene.mysql.mysql_migrate_cluster_flow import MySQLMigrateClusterFlow +from backend.flow.engine.bamboo.scene.mysql.mysql_migrate_cluster_remote_flow import MySQLMigrateClusterRemoteFlow +from backend.flow.plugins.components.collections.common.pause import PauseComponent from backend.flow.plugins.components.collections.mysql.exec_actuator_script import ExecuteDBActuatorScriptComponent from backend.flow.plugins.components.collections.mysql.mysql_db_meta import MySQLDBMetaComponent from backend.flow.plugins.components.collections.mysql.trans_flies import TransFileComponent +from backend.flow.utils.mysql.common.mysql_cluster_info import get_version_and_charset from backend.flow.utils.mysql.mysql_act_dataclass import DBMetaOPKwargs, DownloadMediaKwargs, ExecActuatorKwargs from backend.flow.utils.mysql.mysql_act_playload import MysqlActPayload from backend.flow.utils.mysql.mysql_db_meta import MySQLDBMeta from backend.flow.utils.mysql.mysql_version_parse import mysql_version_parse +from backend.ticket.builders.common.constants import MySQLBackupSource logger = logging.getLogger("flow") -class MySQLLocalUpgradeFlow(object): +def upgrade_version_check(origin_ver: str, new_ver: str): + new_version_num = mysql_version_parse(new_ver) + original_vernum = mysql_version_parse(origin_ver) + if new_version_num > MYSQL8_VER_PARSE_NUM: + new_version_num = convert_mysql8_version_num(new_version_num) + if new_version_num // 1000 - original_vernum // 1000 > 1: + logger.error("upgrades across multiple major versions are not allowed") + raise DBMetaException(message=_("不允许跨多个大版本升级")) + if original_vernum >= new_version_num: + logger.error( + "the upgrade version {} needs to be larger than the current verion {}".format( + new_version_num, original_vernum + ) + ) + raise DBMetaException(message=_("当前集群MySQL升级版本大于等于新版本,请确认")) + + +def convert_mysql8_version_num(verno: int) -> int: + # MySQL的发行版本号并不连续 MySQL 5.5 5.6 5.7 8.0 + # 为了方便比较将8.0 装换成 parse 之后的5.8的版本号来做比较 + return 5008 * 1000 + verno % 1000 + + +# MySQLMigrateClusterRemoteFlow: 使用远程备份来恢复 +# MySQLMigrateClusterFlow: 使用本地备份来恢复 +class MySQMigrateUpgradeFlow(MySQLMigrateClusterRemoteFlow, MySQLMigrateClusterFlow): + """ + 构建mysql主从成对迁移的方式来升级MySQL + """ + + def mysql_upgrade_by_migrate_flow(self): + # 进行模块的版本检查 + self.__pre_check() + # 根据选择的备份类型来选择不用的流程执行迁移升级 + if self.data["backup_source"] == MySQLBackupSource.LOCAL: + # 使用本地备份来做迁移 + self.deploy_migrate_cluster_flow(use_for_upgrade=True) + elif self.data["backup_source"] == MySQLBackupSource.REMOTE: + # 使用远程备份来做迁移 + self.migrate_cluster_flow() + + def __pre_check(self): + for info in self.ticket_data["infos"]: + self.data = copy.deepcopy(info) + cluster_class = Cluster.objects.get(id=self.data["cluster_ids"][0]) + origin_chaset, origin_mysql_ver = get_version_and_charset( + self.data["bk_biz_id"], + db_module_id=cluster_class.db_module_id, + cluster_type=cluster_class.cluster_type, + ) + new_charset, new_mysql_ver = get_version_and_charset( + self.data["bk_biz_id"], + db_module_id=self.data["new_db_module_id"], + cluster_type=cluster_class.cluster_type, + ) + if new_charset != origin_chaset: + raise DBMetaException( + message=_("{}升级前后字符集不一致,原字符集:{},新模块的字符集{}").format( + cluster_class.immute_domain, origin_chaset, new_charset + ) + ) + upgrade_version_check(origin_mysql_ver, new_mysql_ver) + + +class MySQLStorageLocalUpgradeFlow(object): """ - mysql 本地升级场景,只允许升级slave实例 + MySQL集群原地升级,先升级从库,在进行主从切换,在升级 + { + bk_biz_id: 0, + bk_cloud_id: 0, + infos:[ + { + cluster_ids:[], + cluster_type:"", + new_mysql_version:"", + new_module_id:"" + } + ] + } """ def __init__(self, root_id: str, data: Optional[Dict]): @@ -46,64 +129,210 @@ def __init__(self, root_id: str, data: Optional[Dict]): self.root_id = root_id self.data = data self.uid = data["uid"] + self.upgrade_cluster_list = data["infos"] self.new_mysql_version = data["new_mysql_version"] self.new_version_num = mysql_version_parse(self.__get_pkg_name_by_version(self.new_mysql_version)) if self.new_version_num > MYSQL8_VER_PARSE_NUM: - self.new_version_num = self.__convert_mysql8_version_num(self.new_version_num) + self.new_version_num = convert_mysql8_version_num(self.new_version_num) - def __get_mysql_instance_by_host(self, ip: str) -> list: - return StorageInstance.objects.filter(machine__ip=ip) + def __the_clusters_use_same_machine(self, cluster_ids: list): + clusters = Cluster.objects.filter(id__in=cluster_ids) + instances = StorageInstance.objects.filter(cluster__in=clusters, machine_type=MachineType.BACKEND) + mach_ip_list = [] + for ins in instances: + mach_ip_list.append(ins.machine.ip) + # 根据主机再去查询关联的实例 + # relation_cluster_ids 是根据主机反查得到的关联的集群cluster_ids + relation_cluster_ids = [] + for ip in mach_ip_list: + mach_rela_instances = StorageInstance.objects.filter(machine__ip=ip) + for ins in mach_rela_instances: + relation_cluster = ins.cluster.get() + relation_cluster_ids.append(relation_cluster.id) + # 求差集 + diff_ids = set(cluster_ids) - set(relation_cluster_ids) + if len(diff_ids) > 0: + raise DBMetaException(message=_("当前集群,请确认")) + diff_ids = set(relation_cluster_ids) - set(cluster_ids) + if len(diff_ids) > 0: + raise DBMetaException(message=_("必须把主机关联的集群都选上,请确认")) + + def __get_clusters_slave_instance(self, cluster_ids: list): + clusters = Cluster.objects.filter(id__in=cluster_ids) + instances = StorageInstance.objects.filter( + cluster__in=clusters, + machine_type=MachineType.BACKEND, + instance_role=InstanceRole.BACKEND_SLAVE, + is_stand_by=True, + ) + return instances + + def __get_clusters_master_instance(self, cluster_ids: list): + clusters = Cluster.objects.filter(id__in=cluster_ids) + instances = StorageInstance.objects.filter( + cluster__in=clusters, machine_type=MachineType.BACKEND, instance_role=InstanceRole.BACKEND_MASTER + ) + return instances + + def __get_tendbsingle_instance(self, cluster_ids: list): + clusters = Cluster.objects.filter(id__in=cluster_ids) + instances = StorageInstance.objects.filter( + cluster__in=clusters, machine_type=MachineType.BACKEND, instance_role=InstanceRole.ORPHAN + ) + return instances def __get_pkg_name_by_version(self, version: str) -> str: # 获取大版本的最新的包名 mysql_pkg = Package.get_latest_package(version=version, pkg_type=MediumEnum.MySQL, db_type=DBType.MySQL) return mysql_pkg.name - def __convert_mysql8_version_num(self, verno: int) -> int: - # MySQL的发行版本号并不连续 MySQL 5.5 5.6 5.7 8.0 - # 为了方便比较将8.0 装换成 parse 之后的5.8的版本号来做比较 - return 5008 * 1000 + verno % 1000 - def upgrade_mysql_flow(self): - proxy_upgrade_pipeline = Builder(root_id=self.root_id, data=self.data) + mysql_upgrade_pipeline = Builder(root_id=self.root_id, data=self.data) sub_pipelines = [] cluster_ids = [] # 声明子流程 - for mysql in self.data["mysql_ip_list"]: + for upgrade_info in self.upgrade_cluster_list: sub_flow_context = copy.deepcopy(self.data) sub_flow_context.pop("mysql_ip_list") - ports = [] - mysql_instances = self.__get_mysql_instance_by_host(mysql["ip"]) - if len(mysql_instances) <= 0: - raise DBMetaException(message=_("根据mysql 机器ip {} 无法找到对应的实例记录").format(mysql["ip"])) - for mysql_instance in mysql_instances: - cluster_ids.append(mysql_instance.cluster.get().id) - ports.append(mysql_instance.port) - current_version = mysql_version_parse(mysql_instance.version) - if current_version >= self.new_version_num: - logger.error( - "the upgrade version {} needs to be larger than the current verion {}".format( - self.new_version_num, current_version - ) - ) - raise DBMetaException(message=_("待升级版本大于等于新版本,请确认升级的版本")) - if self.new_version_num // 1000 - current_version // 1000 > 1: - logger.error("upgrades across multiple major versions are not allowed") - raise DBMetaException(message=_("不允许跨多个大版本升级")) + cluster_ids = upgrade_info["cluster_ids"] sub_pipeline = SubBuilder( root_id=self.root_id, data=copy.deepcopy(sub_flow_context), need_random_pass_cluster_ids=cluster_ids ) - sub_pipeline.add_sub_pipeline( - sub_flow=self.upgrade_mysql_subflow( - bk_cloud_id=mysql["bk_cloud_id"], - ip=mysql["ip"], - proxy_ports=ports, + + if upgrade_info["cluster_type"] == ClusterType.TenDBCluster: + + slave_instances = self.__get_clusters_slave_instance(cluster_ids) + if len(slave_instances) <= 0: + raise DBMetaException(message=_("无法找到对应的从实例记录")) + + master_instances = self.__get_clusters_master_instance(cluster_ids) + + master_ip_list = [] + for master_instance in master_instances: + master_ip_list.append(master_instance.machine.ip) + + if len(list(set(master_ip_list))) != 1: + raise DBMetaException(message=_("集群的master应该同属于一个机器,当前分布在{}").format(list(set(master_ip_list)))) + + master_ip = master_ip_list[0] + + port_map = {} + for slave_instance in slave_instances: + port_map[slave_instance.machine.ip].append(slave_instance.port) + upgrade_version_check(slave_instance.version, self.new_mysql_version) + + for slaveIp, ports in port_map: + sub_pipeline.add_sub_pipeline( + sub_flow=self.upgrade_mysql_subflow( + bk_cloud_id=self.data["bk_cloud_id"], + ip=slaveIp, + proxy_ports=ports, + ) + ) + + # 切换子流程 + switch_sub_pipeline_list = [] + for cluster_id in cluster_ids: + switch_sub_pipeline = SubBuilder(root_id=self.root_id, data=copy.deepcopy(self.data)) + cluster_model = Cluster.objects.get(id=cluster_id) + master_model = cluster_model.storageinstance_set.get( + instance_inner_role=InstanceInnerRole.MASTER.value + ) + + standby_slave = cluster_model.storageinstance_set.get( + instance_inner_role=InstanceInnerRole.SLAVE.value, is_stand_by=True + ) + + slave_ip = standby_slave.machine.ip + + other_slave_storage = cluster_model.storageinstance_set.filter( + instance_inner_role=InstanceInnerRole.SLAVE.value, is_stand_by=False + ) + + other_slaves = [y.machine.ip for y in other_slave_storage] + cluster = { + "cluster_id": cluster_model.id, + "bk_cloud_id": cluster_model.bk_cloud_id, + "old_master_ip": master_ip, + "old_master_port": master_model.port, + "old_slave_ip": slave_ip, + "old_slave_port": standby_slave.port, + "new_master_ip": slave_ip, + "new_master_port": standby_slave.port, + "new_slave_ip": master_ip, + "new_slave_port": master_model.port, + "mysql_port": master_model.port, + "master_port": master_model.port, + "other_slave_info": other_slaves, + } + switch_sub_pipeline.add_sub_pipeline( + sub_flow=master_and_slave_switch( + root_id=self.root_id, + ticket_data=copy.deepcopy(self.data), + cluster=cluster_model, + cluster_info=copy.deepcopy(cluster), + ) + ) + switch_sub_pipeline.add_act( + act_name=_("集群切换完成,写入 {} 的元信息".format(cluster_model.id)), + act_component_code=MySQLDBMetaComponent.code, + kwargs=asdict( + DBMetaOPKwargs( + db_meta_class_func=MySQLDBMeta.mysql_migrate_cluster_switch_storage.__name__, + cluster=cluster, + is_update_trans_data=True, + ) + ), + ) + + switch_sub_pipeline_list.append( + switch_sub_pipeline.build_sub_process(sub_name=_("集群 {} 切换".format(cluster_model.id))) ) - ) - sub_pipelines.append(sub_pipeline.build_sub_process(sub_name=_("本地升级MySQL版本"))) - proxy_upgrade_pipeline.add_parallel_sub_pipeline(sub_flow_list=sub_pipelines) - proxy_upgrade_pipeline.run_pipeline(is_drop_random_user=True) + + sub_pipeline.add_act(act_name=_("人工确认切换"), act_component_code=PauseComponent.code, kwargs={}) + sub_pipeline.add_parallel_sub_pipeline(sub_flow_list=switch_sub_pipeline_list) + + # origin master 升级 + port_map = {} + for instance in master_instances: + port_map[instance.machine.ip].append(instance.port) + upgrade_version_check(instance.version, self.new_mysql_version) + + for slaveIp, ports in port_map: + sub_pipeline.add_sub_pipeline( + sub_flow=self.upgrade_mysql_subflow( + bk_cloud_id=self.data["bk_cloud_id"], + ip=slaveIp, + proxy_ports=ports, + ) + ) + + sub_pipelines.append(sub_pipeline.build_sub_process(sub_name=_("[TendbHa]本地升级MySQL版本"))) + + # tendbsingle 本地升级流程 + elif upgrade_info["cluster_type"] == ClusterType.TenDBSingle: + instances = self.__get_tendbsingle_instance(cluster_ids) + ipList = [] + ports = [] + for instance in instances: + ports.append(instance.port) + ipList.append(instance.machine.ip) + upgrade_version_check(instance.version, self.new_mysql_version) + + sub_pipeline.add_sub_pipeline( + sub_flow=self.upgrade_mysql_subflow( + bk_cloud_id=self.data["bk_cloud_id"], + ip=ipList[0], + proxy_ports=ports, + ) + ) + sub_pipelines.append(sub_pipeline.build_sub_process(sub_name=_("[TendbSingle]本地升级MySQL版本"))) + + mysql_upgrade_pipeline.add_parallel_sub_pipeline(sub_flow_list=sub_pipelines) + + mysql_upgrade_pipeline.run_pipeline(is_drop_random_user=True) + return def upgrade_mysql_subflow( @@ -154,13 +383,13 @@ def upgrade_mysql_subflow( ) # 更新proxy instance version 信息 sub_pipeline.add_act( - act_name=_("更新proxy version meta信息"), + act_name=_("更新mysql instance version meta信息"), act_component_code=MySQLDBMetaComponent.code, kwargs=asdict( DBMetaOPKwargs( - db_meta_class_func=MySQLDBMeta.update_proxy_instance_version.__name__, - cluster={"proxy_ip": ip, "version": self.new_version_num}, + db_meta_class_func=MySQLDBMeta.update_mysql_instance_version.__name__, + cluster={"ip": ip, "version": self.new_version_num}, ) ), ) - return sub_pipeline.build_sub_process(sub_name=_("proxy实例升级")) + return sub_pipeline.build_sub_process(sub_name=_("MySQL实例升级")) diff --git a/dbm-ui/backend/flow/engine/controller/mysql.py b/dbm-ui/backend/flow/engine/controller/mysql.py index 711bc59612..8352594973 100644 --- a/dbm-ui/backend/flow/engine/controller/mysql.py +++ b/dbm-ui/backend/flow/engine/controller/mysql.py @@ -49,7 +49,7 @@ from backend.flow.engine.bamboo.scene.mysql.mysql_single_disable_flow import MySQLSingleDisableFlow from backend.flow.engine.bamboo.scene.mysql.mysql_single_enable_flow import MySQLSingleEnableFlow from backend.flow.engine.bamboo.scene.mysql.mysql_truncate_flow import MySQLTruncateFlow -from backend.flow.engine.bamboo.scene.mysql.mysql_upgrade import MySQLLocalUpgradeFlow +from backend.flow.engine.bamboo.scene.mysql.mysql_upgrade import MySQLStorageLocalUpgradeFlow from backend.flow.engine.bamboo.scene.mysql.pt_table_sync import PtTableSyncFlow from backend.flow.engine.controller.base import BaseController @@ -592,25 +592,24 @@ def mysql_upgrade_scene(self): """ mysql实例本地升级场景(新flow编排) ticket_data 参数结构体样例 + 必须选择关联主机的所有集群 { "uid": "2022051612120001", "created_by": "xxx", "bk_biz_id": "152", "ticket_type": "MYSQL_UPGRADE", "new_mysql_version": "MySQL-5.7", - "mysql_ip_list":[ + "infos":[ { - "bk_cloud_id: 0, - "ip":"1.1.1.1", + "cluster_ids":[1001,1002,1003], } { - "bk_cloud_id: 0, - "ip":"2.2.2.2", + "cluster_ids":[2001,2002,2003], } ] } """ - flow = MySQLLocalUpgradeFlow(root_id=self.root_id, data=self.ticket_data) + flow = MySQLStorageLocalUpgradeFlow(root_id=self.root_id, data=self.ticket_data) flow.upgrade_mysql_flow() def mysql_data_migrate_scene(self): diff --git a/dbm-ui/backend/flow/utils/mysql/mysql_db_meta.py b/dbm-ui/backend/flow/utils/mysql/mysql_db_meta.py index ca263e0f58..5cd0656ea0 100644 --- a/dbm-ui/backend/flow/utils/mysql/mysql_db_meta.py +++ b/dbm-ui/backend/flow/utils/mysql/mysql_db_meta.py @@ -934,6 +934,15 @@ def update_proxy_instance_version(self): machine__ip=self.cluster["proxy_ip"], ).update(version=self.cluster["version"]) + def update_mysql_instance_version(self): + """ + 升级后更新mysql版本信息 + """ + with atomic(): + StorageInstance.objects.filter( + machine__ip=self.cluster["ip"], + ).update(version=self.cluster["version"]) + def update_machine_system_info(self): """ 更新machine system info From 54fefb85e8a179e91e7ad3df9d1c2ed6a865c0ac Mon Sep 17 00:00:00 2001 From: durant <826035498@qq.com> Date: Tue, 18 Jun 2024 15:46:01 +0800 Subject: [PATCH 13/29] =?UTF-8?q?feat(backend):=20=E7=BB=9F=E4=B8=80?= =?UTF-8?q?=E9=9A=90=E8=97=8F=E8=BF=94=E5=9B=9E=E7=9A=84=20IP=20#5007?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- dbm-ui/backend/bk_web/constants.py | 3 +++ dbm-ui/backend/bk_web/middleware.py | 12 +++++++++++- 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/dbm-ui/backend/bk_web/constants.py b/dbm-ui/backend/bk_web/constants.py index 72fa60df11..9bd5770607 100644 --- a/dbm-ui/backend/bk_web/constants.py +++ b/dbm-ui/backend/bk_web/constants.py @@ -8,6 +8,7 @@ an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ +import re # 常规字段长度定义 from blue_krill.data_types.enum import EnumField, StructuredEnum @@ -19,6 +20,8 @@ LEN_X_LONG = 1000 LEN_XX_LONG = 10000 +IP_RE = re.compile(r"\b(?:\d{1,3}\.){3}\d{1,3}\b") + # 字段默认值 EMPTY_INT = 0 EMPTY_STRING = "" diff --git a/dbm-ui/backend/bk_web/middleware.py b/dbm-ui/backend/bk_web/middleware.py index 784b94c643..e9dd5223c1 100644 --- a/dbm-ui/backend/bk_web/middleware.py +++ b/dbm-ui/backend/bk_web/middleware.py @@ -24,7 +24,7 @@ from django.utils.translation import ugettext as _ from backend import env -from backend.bk_web.constants import NON_EXTERNAL_PROXY_ROUTING, ROUTING_WHITELIST_PATTERNS +from backend.bk_web.constants import IP_RE, NON_EXTERNAL_PROXY_ROUTING, ROUTING_WHITELIST_PATTERNS from backend.bk_web.exceptions import ExternalProxyBaseException, ExternalRouteInvalidException from backend.bk_web.handlers import _error from backend.ticket.constants import TicketType @@ -151,6 +151,7 @@ def __check_action_permission_is_none(self, request): def __check_specific_request_params(self, request): """校验特殊接口的参数是否满足要求""" + # 单据创建校验函数 def check_create_ticket(): data = json.loads(request.body.decode("utf-8")) @@ -214,6 +215,15 @@ def __call__(self, request): request.is_external = str2bool(request.headers.get("IS-EXTERNAL", ""), strict=False) return self.get_response(request) + def process_response(self, request, response): + if env.ENABLE_EXTERNAL_PROXY: + response.content = self.replace_ip(response.content.decode("utf-8")).encode("utf-8") + return response + + @staticmethod + def replace_ip(self, text): + return re.sub(IP_RE, "*.*.*.*", text) + class ExternalUserModelBackend(UserModelBackend): """ From 32d19c8fb09c4f5b98559d194db3e9f82b5bf736 Mon Sep 17 00:00:00 2001 From: durant <826035498@qq.com> Date: Tue, 18 Jun 2024 16:02:27 +0800 Subject: [PATCH 14/29] chore(other): release #5018 --- helm-charts/bk-dbm/Chart.lock | 6 +++--- helm-charts/bk-dbm/Chart.yaml | 6 +++--- helm-charts/bk-dbm/charts/dbm/Chart.yaml | 2 +- helm-charts/bk-dbm/charts/dbpartition/Chart.yaml | 4 ++-- helm-charts/bk-dbm/charts/dbpriv/Chart.yaml | 2 +- 5 files changed, 10 insertions(+), 10 deletions(-) diff --git a/helm-charts/bk-dbm/Chart.lock b/helm-charts/bk-dbm/Chart.lock index c2df4f02f6..215b7b4b50 100644 --- a/helm-charts/bk-dbm/Chart.lock +++ b/helm-charts/bk-dbm/Chart.lock @@ -28,7 +28,7 @@ dependencies: version: 0.1.33 - name: dbpartition repository: file://charts/dbpartition - version: 0.1.13 + version: 0.1.14 - name: db-simulation repository: file://charts/db-simulation version: 0.1.11 @@ -56,5 +56,5 @@ dependencies: - name: backup-consumer repository: file://charts/backup-consumer version: 0.0.3 -digest: sha256:ae650cea366c4e8836aa05a8031d52dd3f38ef591c929b0f881e9b7e4281ec23 -generated: "2024-06-17T10:56:27.830979+08:00" +digest: sha256:e56a681e3ebe67f7b7f6920622586a25a5f7dcfb600c5c162bfd3cbe1dbb4a17 +generated: "2024-06-18T16:01:58.028796+08:00" diff --git a/helm-charts/bk-dbm/Chart.yaml b/helm-charts/bk-dbm/Chart.yaml index 17f8f8ef23..4227dcd930 100644 --- a/helm-charts/bk-dbm/Chart.yaml +++ b/helm-charts/bk-dbm/Chart.yaml @@ -39,7 +39,7 @@ dependencies: - condition: dbpartition.enabled name: dbpartition repository: file://charts/dbpartition - version: 0.1.13 + version: 0.1.14 - condition: db-simulation.enabled name: db-simulation repository: file://charts/db-simulation @@ -79,5 +79,5 @@ dependencies: description: A Helm chart for bk-dbm name: bk-dbm type: application -version: 1.4.0-alpha.58 -appVersion: 1.4.0-alpha.58 +version: 1.4.0-alpha.59 +appVersion: 1.4.0-alpha.59 diff --git a/helm-charts/bk-dbm/charts/dbm/Chart.yaml b/helm-charts/bk-dbm/charts/dbm/Chart.yaml index 0e4ddcfc2c..555ea11b0d 100644 --- a/helm-charts/bk-dbm/charts/dbm/Chart.yaml +++ b/helm-charts/bk-dbm/charts/dbm/Chart.yaml @@ -1,5 +1,5 @@ apiVersion: v2 -appVersion: 1.4.0-alpha.919 +appVersion: 1.4.0-alpha.945 description: A Helm chart for dbm name: dbm type: application diff --git a/helm-charts/bk-dbm/charts/dbpartition/Chart.yaml b/helm-charts/bk-dbm/charts/dbpartition/Chart.yaml index dd032753b1..3c80de3632 100644 --- a/helm-charts/bk-dbm/charts/dbpartition/Chart.yaml +++ b/helm-charts/bk-dbm/charts/dbpartition/Chart.yaml @@ -1,6 +1,6 @@ apiVersion: v2 -appVersion: 0.0.1-alpha.209 +appVersion: 0.0.1-alpha.210 name: dbpartition description: A Helm chart for db partition service type: application -version: 0.1.13 +version: 0.1.14 diff --git a/helm-charts/bk-dbm/charts/dbpriv/Chart.yaml b/helm-charts/bk-dbm/charts/dbpriv/Chart.yaml index 6380232978..cd12f80ad0 100644 --- a/helm-charts/bk-dbm/charts/dbpriv/Chart.yaml +++ b/helm-charts/bk-dbm/charts/dbpriv/Chart.yaml @@ -1,5 +1,5 @@ apiVersion: v2 -appVersion: 0.0.1-alpha.326 +appVersion: 0.0.1-alpha.337 name: dbpriv description: A Helm chart for dbpriv type: application From 17f6cbe1fdf5a8fa64754dc527cb67a53761dcf3 Mon Sep 17 00:00:00 2001 From: xfwduke Date: Tue, 18 Jun 2024 16:47:45 +0800 Subject: [PATCH 15/29] =?UTF-8?q?fix(mysql):=20=E9=9B=86=E7=BE=A4=E5=B1=8F?= =?UTF-8?q?=E8=94=BDdbha=E7=8A=B6=E6=80=81=E5=86=99=E5=8F=8D=E4=BA=86=20#5?= =?UTF-8?q?027?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- dbm-ui/backend/db_meta/models/cluster.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dbm-ui/backend/db_meta/models/cluster.py b/dbm-ui/backend/db_meta/models/cluster.py index 81b672c55d..94a14eec04 100644 --- a/dbm-ui/backend/db_meta/models/cluster.py +++ b/dbm-ui/backend/db_meta/models/cluster.py @@ -334,7 +334,7 @@ def get_cluster_stats(cls, cluster_types) -> dict: def is_dbha_disabled(self) -> bool: try: - return self.clusterdbhaext.end_time < datetime.now(timezone.utc) + return self.clusterdbhaext.end_time >= datetime.now(timezone.utc) except ObjectDoesNotExist: return False From 2682214a9a377f8f2850a9c5e95d9d826a5b8a63 Mon Sep 17 00:00:00 2001 From: 3octaves <873551943@qq.com> Date: Tue, 18 Jun 2024 16:59:15 +0800 Subject: [PATCH 16/29] =?UTF-8?q?feat(frontend):=20sqlserver=E9=83=A8?= =?UTF-8?q?=E7=BD=B2=E5=8F=82=E6=95=B0=E5=8F=98=E6=9B=B4=20#5022?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../SqlServerCreateDbModule.vue | 4 +- .../sqlserver-manage/apply/SqlServer.vue | 24 ++-- .../apply/components/PreviewTable.vue | 117 ++++++++---------- .../components/demand-factory/common/types.ts | 14 ++- .../demand-factory/sqlserver/Details.vue | 81 ++++++------ 5 files changed, 125 insertions(+), 115 deletions(-) diff --git a/dbm-ui/frontend/src/views/service-apply/create-db-module/SqlServerCreateDbModule.vue b/dbm-ui/frontend/src/views/service-apply/create-db-module/SqlServerCreateDbModule.vue index 3110838e0a..05d1eabd21 100644 --- a/dbm-ui/frontend/src/views/service-apply/create-db-module/SqlServerCreateDbModule.vue +++ b/dbm-ui/frontend/src/views/service-apply/create-db-module/SqlServerCreateDbModule.vue @@ -205,7 +205,7 @@ const haModeList = [ { - value: 'image', + value: 'mirroring', label: t('镜像'), }, { @@ -347,7 +347,7 @@ if (Number(version.slice(-4)) > 2017) { formData.haMode = 'alwaysOn'; } else { - formData.haMode = 'image'; + formData.haMode = 'mirroring'; } } }, { immediate: true }); diff --git a/dbm-ui/frontend/src/views/sqlserver-manage/apply/SqlServer.vue b/dbm-ui/frontend/src/views/sqlserver-manage/apply/SqlServer.vue index 3168d54eb8..3a91b89df2 100644 --- a/dbm-ui/frontend/src/views/sqlserver-manage/apply/SqlServer.vue +++ b/dbm-ui/frontend/src/views/sqlserver-manage/apply/SqlServer.vue @@ -278,7 +278,7 @@ :data="previewData" :is-show-nodes="formData.details.ip_source === 'manual_input'" :is-single-type="isSingleType" - :nodes="previewNodes" /> + :node-list="previewNodes" /> + }>({ [ClusterTypes.TENDBCLUSTER]: [] }); const formData = reactive({ + backup_source: 'local', need_checksum: false, trigger_checksum_type: 'timer', trigger_checksum_time: dayjs().format('YYYY-MM-DD HH:mm:ss'), + remark: '', }); // 集群域名是否已存在表格的映射表 @@ -199,13 +236,23 @@ } }; + // 复制行数据 + const handleClone = (index: number, sourceData: IDataRow) => { + const dataList = [...tableData.value]; + dataList.splice(index + 1, 0, sourceData); + tableData.value = dataList; + setTimeout(() => { + rowRefs.value[rowRefs.value.length - 1].getValue(); + }); + }; + const handleSubmit = () => { isSubmitting.value = true; Promise.all(rowRefs.value.map((item: { getValue: () => Promise }) => item.getValue())) .then((data) => createTicket({ - ticket_type: 'TENDBCLUSTER_NODE_REBALANCE', - remark: '', + ticket_type: TicketTypes.TENDBCLUSTER_NODE_REBALANCE, + remark: formData.remark, details: { ...formData, infos: data, diff --git a/dbm-ui/frontend/src/views/spider-manage/capacity-change/pages/page1/components/RenderData/RenderCluster.vue b/dbm-ui/frontend/src/views/spider-manage/capacity-change/pages/page1/components/RenderData/RenderCluster.vue index 01fddc51d0..c6b0fcfa63 100644 --- a/dbm-ui/frontend/src/views/spider-manage/capacity-change/pages/page1/components/RenderData/RenderCluster.vue +++ b/dbm-ui/frontend/src/views/spider-manage/capacity-change/pages/page1/components/RenderData/RenderCluster.vue @@ -136,6 +136,12 @@ bk_cloud_id: modelValue.value!.bkCloudId, cluster_shard_num: modelValue.value!.clusterShardNum, db_module_id: modelValue.value!.dbModuleId, + })) + .catch(() => Promise.reject({ + cluster_id: modelValue.value?.id, + bk_cloud_id: modelValue.value?.bkCloudId, + cluster_shard_num: modelValue.value?.clusterShardNum, + db_module_id: modelValue.value?.dbModuleId, })); }, }); diff --git a/dbm-ui/frontend/src/views/spider-manage/capacity-change/pages/page1/components/RenderData/RenderTargetResourceSpec.vue b/dbm-ui/frontend/src/views/spider-manage/capacity-change/pages/page1/components/RenderData/RenderTargetResourceSpec.vue index c92cfb092d..cea8a6aabe 100644 --- a/dbm-ui/frontend/src/views/spider-manage/capacity-change/pages/page1/components/RenderData/RenderTargetResourceSpec.vue +++ b/dbm-ui/frontend/src/views/spider-manage/capacity-change/pages/page1/components/RenderData/RenderTargetResourceSpec.vue @@ -79,24 +79,29 @@ diff --git a/dbm-ui/frontend/src/views/spider-manage/capacity-change/pages/page1/components/RenderData/Row.vue b/dbm-ui/frontend/src/views/spider-manage/capacity-change/pages/page1/components/RenderData/Row.vue index 3dcdfd350a..622eb118c9 100644 --- a/dbm-ui/frontend/src/views/spider-manage/capacity-change/pages/page1/components/RenderData/Row.vue +++ b/dbm-ui/frontend/src/views/spider-manage/capacity-change/pages/page1/components/RenderData/Row.vue @@ -42,11 +42,13 @@ + :cluster-data="localClusterData" + :row-data="data" /> @@ -78,7 +80,9 @@ backend_group: { spec_id: number; count: number; - affinity: ''; + affinity: string; + futureCapacity: number; + specName: string; }; }; } @@ -108,6 +112,7 @@ interface Emits { (e: 'add', params: Array): void; (e: 'remove'): void; + (e: 'clone', value: IDataRow): void; } interface Exposes { @@ -150,16 +155,34 @@ emits('remove'); }; + const getRowData = () => [ + clusterRef.value!.getValue(), + resourceSpecRef.value!.getValue(), + targetResourceSpecRef.value!.getValue(), + ]; + + const handleClone = () => { + Promise.allSettled(getRowData()).then((rowData) => { + const [clusterData, resourceSpecData, targetResourceSpecData] = rowData.map((item) => + item.status === 'fulfilled' ? item.value : item.reason, + ); + emits( + 'clone', + createRowData({ + clusterData: localClusterData.value, + resourceSpec: { + id: 0, + name: localClusterData.value?.clusterSpec.spec_name ?? '', + }, + resource_spec: targetResourceSpecData.resource_spec, + }), + ); + }); + }; + defineExpose({ getValue() { - return Promise.all([ - clusterRef.value!.getValue(), - targetResourceSpecRef.value!.getValue(), - shardNumRef.value!.getValue(), - machinePairCntRef.value!.getValue(), - capacityRef.value!.getValue(), - resourceSpecRef.value!.getValue(), - ]).then(([clusterData, targetResourceSpecData]) => ({ + return Promise.all(getRowData()).then(([clusterData, resourceSpecData, targetResourceSpecData]) => ({ ...clusterData, ...targetResourceSpecData, })); diff --git a/dbm-ui/frontend/src/views/spider-manage/checksum/pages/page1/Index.vue b/dbm-ui/frontend/src/views/spider-manage/checksum/pages/page1/Index.vue index 7b2e49d229..dad9c35e71 100644 --- a/dbm-ui/frontend/src/views/spider-manage/checksum/pages/page1/Index.vue +++ b/dbm-ui/frontend/src/views/spider-manage/checksum/pages/page1/Index.vue @@ -20,7 +20,7 @@ :title="t('数据校验修复:对集群的主库和从库进行数据一致性校验和修复,其中 MyISAM 引擎库表不会被校验和修复')" /> + >([createRowData({})]); - const selectedClusters = shallowRef<{[key: string]: Array}>({ [ClusterTypes.TENDBCLUSTER]: [] }); + const selectedClusters = shallowRef<{ [key: string]: Array }>({ [ClusterTypes.TENDBCLUSTER]: [] }); // 集群域名是否已存在表格的映射表 let domainMemo: Record = {}; @@ -189,15 +203,21 @@ isShowBatchSelector.value = true; }; - const handleBatchEditScope = (value: string) => { + const handleBatchEditColumn = (value: string | string[], filed: IDataRowBatchKey) => { if (!value || checkListEmpty(tableData.value)) { return; } - tableData.value.forEach((row) => { - Object.assign(row, { - scope: value, + if (filed === 'scope') { + tableData.value.forEach((row) => { + Object.assign(row, { + [filed]: value, + }); }); - }); + } else { + rowRefs.value.map((item: { setLocalBackupInfos: (value: string[], field: IDataRowBatchKey) => void }) => + item.setLocalBackupInfos(value as string[], filed), + ); + } }; // 批量选择 @@ -247,11 +267,11 @@ const handleSubmit = () => { isSubmitting.value = true; - Promise.all(rowRefs.value.map((item: { getValue: () => Promise }) => item.getValue())) + Promise.all(rowRefs.value!.map((item: { getValue: () => Promise }) => item.getValue())) .then((data) => createTicket({ - ticket_type: 'TENDBCLUSTER_CHECKSUM', - remark: '', + ticket_type: TicketTypes.TENDBCLUSTER_CHECKSUM, + remark: formData.remark, details: { ...formData, timing: formatDateToUTC(dayjs(formData.timing).format('YYYY-MM-DD HH:mm:ss')), diff --git a/dbm-ui/frontend/src/views/spider-manage/checksum/pages/page1/components/RenderData/Index.vue b/dbm-ui/frontend/src/views/spider-manage/checksum/pages/page1/components/RenderData/Index.vue index ad618442e1..d67775e135 100644 --- a/dbm-ui/frontend/src/views/spider-manage/checksum/pages/page1/components/RenderData/Index.vue +++ b/dbm-ui/frontend/src/views/spider-manage/checksum/pages/page1/components/RenderData/Index.vue @@ -32,14 +32,14 @@ :width="110"> @@ -113,6 +114,10 @@ }[]; } + type BackupInfoKeys = keyof Omit; + type RowKeys = keyof Pick; + export type IDataRowBatchKey = RowKeys | BackupInfoKeys; + const createBackupInfo = (data = {} as Partial) => ({ master: data.master || '', slave: data.slave || '', @@ -153,6 +158,7 @@ } interface Exposes { + setLocalBackupInfos: (value: string[], field: BackupInfoKeys) => void; getValue: () => Promise; } @@ -179,13 +185,23 @@ if (props.data.clusterData) { localClusterId.value = props.data.clusterData.id; } - if (props.data.scope) { - localScope.value = props.data.scope; - } if (props.data.backupInfos) { localBackupInfos.value = props.data.backupInfos; } }, + { + deep: true, + immediate: true, + }, + ); + + watch( + () => props.data.scope, + () => { + if (props.data.scope) { + localScope.value = props.data.scope; + } + }, { immediate: true, }, @@ -238,6 +254,13 @@ }; defineExpose({ + setLocalBackupInfos(value, field) { + localBackupInfos.value.forEach((backInfo) => { + Object.assign(backInfo, { + [field]: value, + }); + }); + }, getValue() { return Promise.all([ Promise.all(clusterRefs.value.map((item: any) => item.getValue())), diff --git a/dbm-ui/frontend/src/views/spider-manage/common/edit-field/ClusterName.vue b/dbm-ui/frontend/src/views/spider-manage/common/edit-field/ClusterName.vue index ebb7b018d2..e7f2b8b765 100644 --- a/dbm-ui/frontend/src/views/spider-manage/common/edit-field/ClusterName.vue +++ b/dbm-ui/frontend/src/views/spider-manage/common/edit-field/ClusterName.vue @@ -137,7 +137,8 @@ getValue() { return editRef.value .getValue() - .then(() => (localValue.value)); + .then(() => (localValue.value)) + .catch(() => Promise.reject(localValue)) }, }); diff --git a/dbm-ui/frontend/src/views/spider-manage/common/edit/DateTime.vue b/dbm-ui/frontend/src/views/spider-manage/common/edit/DateTime.vue index cda04cd919..e2706f40a9 100644 --- a/dbm-ui/frontend/src/views/spider-manage/common/edit/DateTime.vue +++ b/dbm-ui/frontend/src/views/spider-manage/common/edit/DateTime.vue @@ -75,6 +75,13 @@ const { message: errorMessage, validator } = useValidtor(props.rules); + watch( + () => props.modelValue, + () => { + localValue.value = props.modelValue; + }, + ); + const handleChange = (value: Required['modelValue']) => { localValue.value = value; validator(localValue.value).then(() => { diff --git a/dbm-ui/frontend/src/views/spider-manage/db-backup/pages/page1/Index.vue b/dbm-ui/frontend/src/views/spider-manage/db-backup/pages/page1/Index.vue index 767f3ebd4b..75a95baf55 100644 --- a/dbm-ui/frontend/src/views/spider-manage/db-backup/pages/page1/Index.vue +++ b/dbm-ui/frontend/src/views/spider-manage/db-backup/pages/page1/Index.vue @@ -28,11 +28,13 @@ :data="item" :removeable="tableData.length < 2" @add="(payload: Array) => handleAppend(index, payload)" + @clone="(payload: IDataRow) => handleClone(index, payload)" @input-cluster-finish="(item: IDataRow) => handleInputCluster(index, item)" @remove="() => handleRemove(index)" /> @@ -62,6 +64,7 @@ + ({ backup_type: 'logical', file_tag: 'MYSQL_FULL_BACKUP', + remark: '', }); const { t } = useI18n(); const router = useRouter(); const { currentBizId } = useGlobalBizs(); + // 单据克隆 + useTicketCloneInfo({ + type: TicketTypes.TENDBCLUSTER_FULL_BACKUP, + onSuccess(cloneData) { + const { tableDataList, form } = cloneData; + Object.assign(formData, form); + tableData.value = tableDataList; + window.changeConfirm = true; + }, + }); + const formRef = ref(); const rowRefs = ref(); const isShowBatchSelector = ref(false); @@ -204,13 +222,32 @@ } }; + // 复制行数据 + const handleClone = (index: number, sourceData: IDataRow) => { + const dataList = [...tableData.value]; + dataList.splice( + index + 1, + 0, + Object.assign(sourceData, { + clusterData: { + ...sourceData.clusterData, + domain: tableData.value[index].clusterData?.domain ?? '', + }, + }), + ); + tableData.value = dataList; + setTimeout(() => { + rowRefs.value[rowRefs.value.length - 1].getValue(); + }); + }; + const handleSubmit = () => { Promise.all(rowRefs.value.map((item: { getValue: () => Promise }) => item.getValue())).then((data) => { isSubmitting.value = true; createTicket({ bk_biz_id: currentBizId, - ticket_type: 'TENDBCLUSTER_FULL_BACKUP', - remark: '', + ticket_type: TicketTypes.TENDBCLUSTER_FULL_BACKUP, + remark: formData.remark, details: { infos: { ...formData, @@ -247,10 +284,5 @@ diff --git a/dbm-ui/frontend/src/views/spider-manage/db-backup/pages/page1/components/RenderData/Index.vue b/dbm-ui/frontend/src/views/spider-manage/db-backup/pages/page1/components/RenderData/Index.vue index b29a557bf0..8ca0d4020d 100644 --- a/dbm-ui/frontend/src/views/spider-manage/db-backup/pages/page1/components/RenderData/Index.vue +++ b/dbm-ui/frontend/src/views/spider-manage/db-backup/pages/page1/components/RenderData/Index.vue @@ -35,11 +35,11 @@ v-model="isShowBatchEdit" :data-list="selectList" :title="t('备份位置')" - @change="handleBatchEdit"> + @change="handleBatchEditChange"> + @click="handleBatchEditChangeShow"> @@ -67,9 +67,9 @@ import RenderTableHeadColumn from '@components/render-table/HeadColumn.vue'; import RenderTable from '@components/render-table/Index.vue'; - interface Emits{ - (e: 'batchSelectCluster'): void, - (e: 'batchEditBackupLocal', value: string): void, + interface Emits { + (e: 'batchSelectCluster'): void; + (e: 'batchEditBackupLocal', value: string): void; } const emits = defineEmits(); @@ -89,12 +89,12 @@ }, ]; - const handleShowBatchEdit = () => { + const handleBatchEditChangeShow = () => { isShowBatchEdit.value = !isShowBatchEdit.value; }; - const handleBatchEdit = (value: string) => { - emits('batchEditBackupLocal', value); + const handleBatchEditChange = (value: string | string[]) => { + emits('batchEditBackupLocal', value as string); }; const handleShowBatchSelector = () => { diff --git a/dbm-ui/frontend/src/views/spider-manage/db-backup/pages/page1/components/RenderData/RenderBackupLocal.vue b/dbm-ui/frontend/src/views/spider-manage/db-backup/pages/page1/components/RenderData/RenderBackupLocal.vue index 98a0d63c18..3736085da5 100644 --- a/dbm-ui/frontend/src/views/spider-manage/db-backup/pages/page1/components/RenderData/RenderBackupLocal.vue +++ b/dbm-ui/frontend/src/views/spider-manage/db-backup/pages/page1/components/RenderData/RenderBackupLocal.vue @@ -112,9 +112,16 @@ defineExpose({ getValue() { - return editSelectRef.value.getValue().then(() => ({ - backup_local: localValue.value, - })); + return editSelectRef.value + .getValue() + .then(() => ({ + backup_local: localValue.value, + })) + .catch(() => + Promise.reject({ + backup_local: localValue.value, + }), + ); }, }); diff --git a/dbm-ui/frontend/src/views/spider-manage/db-backup/pages/page1/components/RenderData/RenderCluster.vue b/dbm-ui/frontend/src/views/spider-manage/db-backup/pages/page1/components/RenderData/RenderCluster.vue index 5ab8e52a2d..63c12d6a7e 100644 --- a/dbm-ui/frontend/src/views/spider-manage/db-backup/pages/page1/components/RenderData/RenderCluster.vue +++ b/dbm-ui/frontend/src/views/spider-manage/db-backup/pages/page1/components/RenderData/RenderCluster.vue @@ -159,7 +159,10 @@ .getValue() .then(() => ({ cluster_id: localClusterId.value, - })); + })) + .catch(() => Promise.reject({ + cluster_id: localClusterId.value, + })) }, }); diff --git a/dbm-ui/frontend/src/views/spider-manage/db-backup/pages/page1/components/RenderData/Row.vue b/dbm-ui/frontend/src/views/spider-manage/db-backup/pages/page1/components/RenderData/Row.vue index f9cd630230..0eb2917bbb 100644 --- a/dbm-ui/frontend/src/views/spider-manage/db-backup/pages/page1/components/RenderData/Row.vue +++ b/dbm-ui/frontend/src/views/spider-manage/db-backup/pages/page1/components/RenderData/Row.vue @@ -30,6 +30,7 @@ @@ -66,6 +67,7 @@ interface Emits { (e: 'add', params: Array): void; (e: 'remove'): void; + (e: 'clone', value: IDataRow): void; (e: 'inputClusterFinish', value: IDataRow): void; } @@ -117,14 +119,32 @@ emits('remove'); }; - defineExpose({ - getValue() { - return Promise.all([clusterRef.value.getValue(), backupLocalRef.value.getValue()]).then( - ([clusterData, backupLocalData]) => ({ - ...clusterData, - ...backupLocalData, + const handleClone = () => { + Promise.allSettled(getRowData()).then((rowData) => { + const [clusterData, backupLocalData] = rowData.map((item) => + item.status === 'fulfilled' ? item.value : item.reason, + ); + emits( + 'clone', + createRowData({ + clusterData: { + id: clusterData.cluster_id, + domain: '', + }, + backupLocal: backupLocalData.backup_local, }), ); + }); + }; + + const getRowData = () => [clusterRef.value.getValue(), backupLocalRef.value.getValue()]; + + defineExpose({ + getValue() { + return Promise.all(getRowData()).then(([clusterData, backupLocalData]) => ({ + ...clusterData, + ...backupLocalData, + })); }, }); diff --git a/dbm-ui/frontend/src/views/spider-manage/db-clear/pages/page1/Index.vue b/dbm-ui/frontend/src/views/spider-manage/db-clear/pages/page1/Index.vue index 7986e1d27c..68c655451b 100644 --- a/dbm-ui/frontend/src/views/spider-manage/db-clear/pages/page1/Index.vue +++ b/dbm-ui/frontend/src/views/spider-manage/db-clear/pages/page1/Index.vue @@ -22,7 +22,7 @@ " />
-
+
+ >([createRowData({})]); - const selectedClusters = shallowRef<{[key: string]: Array}>({ [ClusterTypes.TENDBCLUSTER]: [] }); + const remark = ref(''); + + const selectedClusters = shallowRef<{ [key: string]: Array }>({ [ClusterTypes.TENDBCLUSTER]: [] }); // 集群域名是否已存在表格的映射表 let domainMemo: Record = {}; @@ -124,13 +140,13 @@ isShowBatchSelector.value = true; }; - const handleBatchEditTruncateType = (value: string) => { - if (!value) { + const handleBatchEditColumn = (value: string | string[], filed: IDataRowBatchKey) => { + if (!value || checkListEmpty(tableData.value)) { return; } tableData.value.forEach((row) => { Object.assign(row, { - truncateDataType: value, + [filed]: value, }); }); }; @@ -181,13 +197,32 @@ } }; + // 复制行数据 + const handleClone = (index: number, sourceData: IDataRow) => { + const dataList = [...tableData.value]; + dataList.splice( + index + 1, + 0, + Object.assign(sourceData, { + clusterData: { + ...sourceData.clusterData, + domain: tableData.value[index].clusterData?.domain ?? '', + }, + }), + ); + tableData.value = dataList; + setTimeout(() => { + rowRefs.value[rowRefs.value.length - 1].getValue(); + }); + }; + const handleSubmit = () => { isSubmitting.value = true; Promise.all(rowRefs.value.map((item: { getValue: () => Promise }) => item.getValue())) .then((data) => createTicket({ - ticket_type: 'TENDBCLUSTER_TRUNCATE_DATABASE', - remark: '', + ticket_type: TicketTypes.TENDBCLUSTER_TRUNCATE_DATABASE, + remark: remark.value, details: { infos: data, }, @@ -212,6 +247,7 @@ }; const handleReset = () => { + remark.value = ''; tableData.value = [createRowData()]; selectedClusters.value[ClusterTypes.TENDBCLUSTER] = []; domainMemo = {}; diff --git a/dbm-ui/frontend/src/views/spider-manage/db-clear/pages/page1/components/RenderData/Index.vue b/dbm-ui/frontend/src/views/spider-manage/db-clear/pages/page1/components/RenderData/Index.vue index 31ea76fa7e..056652a7ec 100644 --- a/dbm-ui/frontend/src/views/spider-manage/db-clear/pages/page1/components/RenderData/Index.vue +++ b/dbm-ui/frontend/src/views/spider-manage/db-clear/pages/page1/components/RenderData/Index.vue @@ -32,14 +32,14 @@ :width="220"> @@ -81,10 +82,10 @@ clusterData: clusterData ? clusterData : { - id: 0, - domain: '', - type: '', - }, + id: 0, + domain: '', + type: '', + }, }); diff --git a/dbm-ui/frontend/src/views/mysql/db-backup/pages/page1/components/RenderData/RenderCluster.vue b/dbm-ui/frontend/src/views/mysql/db-backup/pages/page1/components/RenderData/RenderCluster.vue index 5ab8e52a2d..6959cd5fcb 100644 --- a/dbm-ui/frontend/src/views/mysql/db-backup/pages/page1/components/RenderData/RenderCluster.vue +++ b/dbm-ui/frontend/src/views/mysql/db-backup/pages/page1/components/RenderData/RenderCluster.vue @@ -159,6 +159,9 @@ .getValue() .then(() => ({ cluster_id: localClusterId.value, + })) + .catch(() => Promise.reject({ + cluster_id: localClusterId.value, })); }, }); diff --git a/dbm-ui/frontend/src/views/mysql/db-backup/pages/page1/components/RenderData/Row.vue b/dbm-ui/frontend/src/views/mysql/db-backup/pages/page1/components/RenderData/Row.vue index 3c01d9a8c4..d7b9731f25 100644 --- a/dbm-ui/frontend/src/views/mysql/db-backup/pages/page1/components/RenderData/Row.vue +++ b/dbm-ui/frontend/src/views/mysql/db-backup/pages/page1/components/RenderData/Row.vue @@ -30,6 +30,7 @@ @@ -66,6 +67,7 @@ interface Emits { (e: 'add', params: Array): void; (e: 'remove'): void; + (e: 'clone', value: IDataRow): void; (e: 'inputClusterFinish', value: IDataRow): void; } @@ -117,14 +119,27 @@ emits('remove'); }; - defineExpose({ - getValue() { - return Promise.all([clusterRef.value.getValue(), backupLocalRef.value.getValue()]).then( - ([clusterData, backupLocalData]) => ({ - ...clusterData, - ...backupLocalData, + const getRowData = () => [clusterRef.value.getValue(), backupLocalRef.value.getValue()]; + + const handleClone = () => { + Promise.allSettled(getRowData()).then((rowData) => { + const rowInfo = rowData.map((item) => (item.status === 'fulfilled' ? item.value : item.reason)); + emits( + 'clone', + createRowData({ + clusterData: props.data.clusterData, + backupLocal: rowInfo[1].backup_local, }), ); + }); + }; + + defineExpose({ + getValue() { + return Promise.all(getRowData()).then(([clusterData, backupLocalData]) => ({ + ...clusterData, + ...backupLocalData, + })); }, }); diff --git a/dbm-ui/frontend/src/views/mysql/db-clear/index.vue b/dbm-ui/frontend/src/views/mysql/db-clear/index.vue index 783a389167..f2ea0b1d0c 100644 --- a/dbm-ui/frontend/src/views/mysql/db-clear/index.vue +++ b/dbm-ui/frontend/src/views/mysql/db-clear/index.vue @@ -29,37 +29,40 @@ - -
- - - {{ t('批量录入') }} - +
+ +
+ + + {{ t('批量录入') }} + +
+ + + + {{ t('安全模式') }} + + +
- - - - {{ t('安全模式') }} - - + -
+ + + + + +