diff --git a/dbm-services/common/db-resource/internal/controller/manage/rs_operation_info.go b/dbm-services/common/db-resource/internal/controller/manage/rs_operation_info.go index 9e979018df..91b20ab145 100644 --- a/dbm-services/common/db-resource/internal/controller/manage/rs_operation_info.go +++ b/dbm-services/common/db-resource/internal/controller/manage/rs_operation_info.go @@ -13,7 +13,6 @@ package manage import ( "encoding/json" "fmt" - "strings" "time" "dbm-services/common/db-resource/internal/model" @@ -22,6 +21,7 @@ import ( "dbm-services/common/go-pubpkg/logger" "github.com/gin-gonic/gin" + "github.com/samber/lo" "gorm.io/gorm" ) @@ -95,11 +95,10 @@ func (p GetOperationInfoParam) query(db *gorm.DB) { if cmutil.IsNotEmpty(p.BeginTime) { db.Where("create_time >= ? ", p.BeginTime) } - switch strings.ToLower(strings.TrimSpace(p.Orderby)) { - case "asc": - db.Order("create_time asc") - default: + if lo.IsEmpty(p.Orderby) { db.Order("create_time desc") + } else { + db.Order(p.Orderby) } } diff --git a/dbm-services/mysql/db-tools/dbactuator/docs/swagger.json b/dbm-services/mysql/db-tools/dbactuator/docs/swagger.json index c2b84f2544..778d5031d4 100644 --- a/dbm-services/mysql/db-tools/dbactuator/docs/swagger.json +++ b/dbm-services/mysql/db-tools/dbactuator/docs/swagger.json @@ -990,6 +990,12 @@ "description": "repl user, 环境变量 GENERAL_ACCOUNT_repl_user", "type": "string" }, + "tbinlogdumper_admin_pwd": { + "type": "string" + }, + "tbinlogdumper_admin_user": { + "type": "string" + }, "tdbctl_pwd": { "type": "string" }, @@ -1607,6 +1613,9 @@ "json", "dump" ] + }, + "time_layout": { + "type": "string" } } }, @@ -1966,6 +1975,9 @@ "general": { "$ref": "#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components.GeneralParam" }, + "mySQLConfigParams": { + "$ref": "#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.MySQLConfigParams" + }, "timeZone": { "type": "string" } @@ -1976,7 +1988,6 @@ "required": [ "charset", "host", - "mycnf_configs", "mysql_version", "pkg", "pkg_md5", @@ -2003,7 +2014,7 @@ "description": "安装实例的内存大小,可以不指定,会自动计算", "type": "integer" }, - "mycnf_configs": { + "myCnfConfigs": { "description": "map[port]my.cnf", "type": "array", "items": { @@ -2180,6 +2191,9 @@ "dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.LocalBackupObj": { "type": "object", "properties": { + "backup_consistent_time": { + "type": "string" + }, "backup_dir": { "description": "备份所在目录", "type": "string" @@ -2229,6 +2243,20 @@ } } }, + "dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.MySQLConfigParams": { + "type": "object", + "required": [ + "mycnf_configs" + ], + "properties": { + "mycnf_configs": { + "type": "array", + "items": { + "type": "integer" + } + } + } + }, "dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.MycnfChangeComp": { "type": "object", "properties": { @@ -2701,7 +2729,7 @@ "type": "boolean" }, "idempotent_mode": { - "description": "是否开启幂等模式, mysql --slave-exec-mode=idempotent or mysqlbinlog --idempotent", + "description": "是否开启幂等模式, mysqlbinlog --idempotent(\u003e=5.7)", "type": "boolean" }, "mysql_client_opt": { @@ -2730,7 +2758,7 @@ "type": "integer" }, "start_time": { - "description": "--start-datetime", + "description": "--start-datetime 时间格式\n格式 \"2006-01-02 15:04:05\" 原样传递给 mysqlbinlog\n格式\"2006-01-02T15:04:05Z07:00\"(示例\"2023-12-11T05:03:05+08:00\")按照机器本地时间,解析成 \"2006-01-02 15:04:05\" 再传递给 mysqlbinlog\n在 Init 时会统一把时间字符串转换成 time.RFC3399", "type": "string" }, "stop_pos": { @@ -2738,7 +2766,7 @@ "type": "integer" }, "stop_time": { - "description": "--stop-datetime", + "description": "--stop-datetime 时间格式同 StartTime,可带时区,会转换成机器本地时间", "type": "string" }, "tables": { @@ -2792,7 +2820,7 @@ } }, "binlog_start_file": { - "description": "指定要开始应用的第 1 个 binlog。如果指定,一般要设置 start_pos,如果不指定则使用 start_time", + "description": "指定要开始应用的第 1 个 binlog。如果指定,一般要设置 start_pos,如果不指定则使用 start_time\nBinlogStartFile 只能由外部传入,不要内部修改", "type": "string" }, "parse_concurrency": { @@ -3028,7 +3056,7 @@ "type": "string" }, "target_time": { - "description": "闪回的目标时间点,对应 recover-binlog 的 start_time, 精确到秒。目标实例的时区", + "description": "闪回的目标时间点,对应 recover-binlog 的 start_time, 精确到秒。目标实例的时区\n可接受格式 ''", "type": "string" }, "tgt_instance": { @@ -3100,10 +3128,6 @@ }, "dbm-services_mysql_db-tools_dbactuator_pkg_components_spiderctl.CheckObject": { "type": "object", - "required": [ - "dbname", - "tables" - ], "properties": { "dbname": { "type": "string" @@ -3154,6 +3178,12 @@ "$ref": "#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_spiderctl.InstanceTuple" } }, + "not_flush_all": { + "type": "boolean" + }, + "only_init_ctl": { + "type": "boolean" + }, "port": { "type": "integer", "minimum": 3306 @@ -3164,6 +3194,12 @@ "$ref": "#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_spiderctl.Instance" } }, + "spider_slave_instances": { + "type": "array", + "items": { + "$ref": "#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_spiderctl.Instance" + } + }, "tdbctl_pass": { "type": "string" }, @@ -3228,11 +3264,14 @@ "dbm-services_mysql_db-tools_dbactuator_pkg_components_spiderctl.TableSchemaCheckParam": { "type": "object", "required": [ - "check_objects", "host", "port" ], "properties": { + "check_all": { + "description": "检查所有非系统库表", + "type": "boolean" + }, "check_objects": { "type": "array", "items": { @@ -3242,6 +3281,9 @@ "host": { "type": "string" }, + "inconsistency_throws_err": { + "type": "boolean" + }, "port": { "type": "integer", "minimum": 3306 @@ -3353,6 +3395,30 @@ }, "dumper_id": { "type": "string" + }, + "kafka_pwd": { + "type": "string" + }, + "kafka_user": { + "type": "string" + }, + "l5_cmdid": { + "type": "integer" + }, + "l5_modid": { + "type": "integer" + }, + "protocol_type": { + "type": "string" + }, + "server_id": { + "type": "integer" + }, + "target_address": { + "type": "string" + }, + "target_port": { + "type": "integer" } } }, @@ -3371,6 +3437,9 @@ "general": { "$ref": "#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components.GeneralParam" }, + "mySQLConfigParams": { + "$ref": "#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.MySQLConfigParams" + }, "renderConfigs": { "type": "object", "additionalProperties": { diff --git a/dbm-services/mysql/db-tools/dbactuator/docs/swagger.yaml b/dbm-services/mysql/db-tools/dbactuator/docs/swagger.yaml index a3a9cefc98..8741976ba6 100644 --- a/dbm-services/mysql/db-tools/dbactuator/docs/swagger.yaml +++ b/dbm-services/mysql/db-tools/dbactuator/docs/swagger.yaml @@ -102,6 +102,10 @@ definitions: repl_user: description: repl user, 环境变量 GENERAL_ACCOUNT_repl_user type: string + tbinlogdumper_admin_pwd: + type: string + tbinlogdumper_admin_user: + type: string tdbctl_pwd: type: string tdbctl_user: @@ -533,6 +537,8 @@ definitions: - json - dump type: string + time_layout: + type: string required: - binlog_dir - binlog_files @@ -785,6 +791,8 @@ definitions: $ref: '#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.InstallMySQLParams' general: $ref: '#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components.GeneralParam' + mySQLConfigParams: + $ref: '#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.MySQLConfigParams' timeZone: type: string type: object @@ -804,7 +812,7 @@ definitions: inst_mem: description: 安装实例的内存大小,可以不指定,会自动计算 type: integer - mycnf_configs: + myCnfConfigs: description: map[port]my.cnf items: type: integer @@ -832,7 +840,6 @@ definitions: required: - charset - host - - mycnf_configs - mysql_version - pkg - pkg_md5 @@ -936,6 +943,8 @@ definitions: type: object dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.LocalBackupObj: properties: + backup_consistent_time: + type: string backup_dir: description: 备份所在目录 type: string @@ -972,6 +981,15 @@ definitions: description: 备份所属 port type: integer type: object + dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.MySQLConfigParams: + properties: + mycnf_configs: + items: + type: integer + type: array + required: + - mycnf_configs + type: object dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.MycnfChangeComp: properties: extend: @@ -1305,7 +1323,7 @@ definitions: description: 是否启用 flashback type: boolean idempotent_mode: - description: 是否开启幂等模式, mysql --slave-exec-mode=idempotent or mysqlbinlog --idempotent + description: 是否开启幂等模式, mysqlbinlog --idempotent(>=5.7) type: boolean mysql_client_opt: $ref: '#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql_restore.MySQLClientOpt' @@ -1330,13 +1348,17 @@ definitions: description: --start-position type: integer start_time: - description: --start-datetime + description: |- + --start-datetime 时间格式 + 格式 "2006-01-02 15:04:05" 原样传递给 mysqlbinlog + 格式"2006-01-02T15:04:05Z07:00"(示例"2023-12-11T05:03:05+08:00")按照机器本地时间,解析成 "2006-01-02 15:04:05" 再传递给 mysqlbinlog + 在 Init 时会统一把时间字符串转换成 time.RFC3399 type: string stop_pos: description: --stop-position type: integer stop_time: - description: --stop-datetime + description: --stop-datetime 时间格式同 StartTime,可带时区,会转换成机器本地时间 type: string tables: description: row event 解析指定 tables @@ -1369,7 +1391,9 @@ definitions: type: string type: array binlog_start_file: - description: 指定要开始应用的第 1 个 binlog。如果指定,一般要设置 start_pos,如果不指定则使用 start_time + description: |- + 指定要开始应用的第 1 个 binlog。如果指定,一般要设置 start_pos,如果不指定则使用 start_time + BinlogStartFile 只能由外部传入,不要内部修改 type: string parse_concurrency: description: 解析的并发度,默认 1 @@ -1536,7 +1560,9 @@ definitions: stop_time: type: string target_time: - description: 闪回的目标时间点,对应 recover-binlog 的 start_time, 精确到秒。目标实例的时区 + description: |- + 闪回的目标时间点,对应 recover-binlog 的 start_time, 精确到秒。目标实例的时区 + 可接受格式 '' type: string tgt_instance: $ref: '#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_native.InsObject' @@ -1597,9 +1623,6 @@ definitions: items: type: string type: array - required: - - dbname - - tables type: object dbm-services_mysql_db-tools_dbactuator_pkg_components_spiderctl.InitClusterRoutingComp: properties: @@ -1620,6 +1643,10 @@ definitions: items: $ref: '#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_spiderctl.InstanceTuple' type: array + not_flush_all: + type: boolean + only_init_ctl: + type: boolean port: minimum: 3306 type: integer @@ -1627,6 +1654,10 @@ definitions: items: $ref: '#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_spiderctl.Instance' type: array + spider_slave_instances: + items: + $ref: '#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_spiderctl.Instance' + type: array tdbctl_pass: type: string tdbctl_user: @@ -1678,17 +1709,21 @@ definitions: type: object dbm-services_mysql_db-tools_dbactuator_pkg_components_spiderctl.TableSchemaCheckParam: properties: + check_all: + description: 检查所有非系统库表 + type: boolean check_objects: items: $ref: '#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_spiderctl.CheckObject' type: array host: type: string + inconsistency_throws_err: + type: boolean port: minimum: 3306 type: integer required: - - check_objects - host - port type: object @@ -1765,6 +1800,22 @@ definitions: type: string dumper_id: type: string + kafka_pwd: + type: string + kafka_user: + type: string + l5_cmdid: + type: integer + l5_modid: + type: integer + protocol_type: + type: string + server_id: + type: integer + target_address: + type: string + target_port: + type: integer type: object dbm-services_mysql_db-tools_dbactuator_pkg_components_tbinlogdumper.InstallTbinlogDumperComp: properties: @@ -1776,6 +1827,8 @@ definitions: $ref: '#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_tbinlogdumper.Configs' general: $ref: '#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components.GeneralParam' + mySQLConfigParams: + $ref: '#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_mysql.MySQLConfigParams' renderConfigs: additionalProperties: $ref: '#/definitions/dbm-services_mysql_db-tools_dbactuator_pkg_components_tbinlogdumper.renderDumperConfigs' diff --git a/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/mysql_data_migrate_dump.go b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/mysql_data_migrate_dump.go new file mode 100644 index 0000000000..93931dfe96 --- /dev/null +++ b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/mysql_data_migrate_dump.go @@ -0,0 +1,99 @@ +/* + * TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-DB管理系统(BlueKing-BK-DBM) available. + * Copyright (C) 2017-2023 THL A29 Limited, a Tencent company. All rights reserved. + * Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at https://opensource.org/licenses/MIT + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ + +package mysqlcmd + +import ( + "fmt" + + "dbm-services/common/go-pubpkg/logger" + "dbm-services/mysql/db-tools/dbactuator/internal/subcmd" + "dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql" + "dbm-services/mysql/db-tools/dbactuator/pkg/util" + + "github.com/spf13/cobra" +) + +// MysqlDataMigrateDumpAct TODO +type MysqlDataMigrateDumpAct struct { + *subcmd.BaseOptions + // 与开区大部分一样,可以服用开区的代码,只在导出库的时候不一样 + Service mysql.OpenAreaDumpSchemaComp +} + +// NewMysqlDataMigrateDumpCommand TODO +func NewMysqlDataMigrateDumpCommand() *cobra.Command { + // *subcmd.BaseOptions是指针变量,需要初始化, subcmd.GBaseOptions在subcmd的init中已被初始化 + act := MysqlDataMigrateDumpAct{ + BaseOptions: subcmd.GBaseOptions, + } + + cmd := &cobra.Command{ + Use: "mysql_data_migrate_dump", + Short: "mysql数据迁移导出库", + Example: fmt.Sprintf( + `dbactuator mysql mysql_data_migrate_dump %s %s`, + subcmd.CmdBaseExampleStr, subcmd.ToPrettyJson(act.Service.Example()), + ), + Run: func(cmd *cobra.Command, args []string) { + util.CheckErr(act.Validate()) + util.CheckErr(act.Init()) + util.CheckErr(act.Run()) + }, + } + return cmd +} + +// Validate TODO +func (d *MysqlDataMigrateDumpAct) Validate() (err error) { + return d.BaseOptions.Validate() +} + +// Init TODO +func (d *MysqlDataMigrateDumpAct) Init() (err error) { + if err = d.Deserialize(&d.Service.Params); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + // d.Deserialize方法执行后,并直接返回值, + d.Service.GeneralParam = subcmd.GeneralRuntimeParam + return nil +} + +// Run TODO +func (d *MysqlDataMigrateDumpAct) Run() (err error) { + steps := subcmd.Steps{ + { + FunName: "init", + Func: d.Service.Init, + }, + { + FunName: "precheck", + Func: d.Service.Precheck, + }, + { + FunName: "运行导出库", + Func: d.Service.MysqlDataMigrate, + }, + { + FunName: "压缩开区文件", + Func: d.Service.CompressDumpDir, + }, + { + FunName: "上传库文件", + Func: d.Service.Upload, + }, + } + if err := steps.Run(); err != nil { + return err + } + logger.Info("mysql数据迁移导出库成功") + return nil +} diff --git a/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/mysql_data_migrate_import.go b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/mysql_data_migrate_import.go new file mode 100644 index 0000000000..dae65bd418 --- /dev/null +++ b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/mysql_data_migrate_import.go @@ -0,0 +1,99 @@ +/* + * TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-DB管理系统(BlueKing-BK-DBM) available. + * Copyright (C) 2017-2023 THL A29 Limited, a Tencent company. All rights reserved. + * Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at https://opensource.org/licenses/MIT + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ + +package mysqlcmd + +import ( + "fmt" + + "dbm-services/common/go-pubpkg/logger" + "dbm-services/mysql/db-tools/dbactuator/internal/subcmd" + "dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql" + "dbm-services/mysql/db-tools/dbactuator/pkg/util" + + "github.com/spf13/cobra" +) + +// MysqlDataMigrateImportAct TODO +type MysqlDataMigrateImportAct struct { + *subcmd.BaseOptions + Service mysql.OpenAreaImportSchemaComp +} + +// NewMysqlDataMigrateImportCommand TODO +func NewMysqlDataMigrateImportCommand() *cobra.Command { + act := MysqlDataMigrateImportAct{ + BaseOptions: subcmd.GBaseOptions, + } + cmd := &cobra.Command{ + Use: "mysql_data_migrate_import", + Short: "mysql数据迁移导入库", + Example: fmt.Sprintf( + `dbactuator mysql mysql_data_migrate_import %s %s`, + subcmd.CmdBaseExampleStr, subcmd.ToPrettyJson(act.Service.Example()), + ), + Run: func(cmd *cobra.Command, args []string) { + util.CheckErr(act.Validate()) + util.CheckErr(act.Init()) + util.CheckErr(act.Run()) + }, + } + return cmd +} + +// Validate TODO +func (d *MysqlDataMigrateImportAct) Validate() (err error) { + return d.BaseOptions.Validate() +} + +// Init TODO +func (d *MysqlDataMigrateImportAct) Init() (err error) { + if err = d.Deserialize(&d.Service.Params); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + d.Service.GeneralParam = subcmd.GeneralRuntimeParam + return +} + +// Run TODO +func (d *MysqlDataMigrateImportAct) Run() (err error) { + steps := subcmd.Steps{ + { + FunName: "init", + Func: d.Service.Init, + }, + { + FunName: "precheck", + Func: d.Service.Precheck, + }, + { + FunName: "解压schema文件", + Func: d.Service.DecompressDumpDir, + }, + { + FunName: "创建库", + Func: d.Service.CreateDatabase, + }, + { + FunName: "导入库文件", + Func: d.Service.MysqlDataMigrateImport, + }, + { + FunName: "清除dump目录", + Func: d.Service.CleanDumpDir, + }, + } + if err := steps.Run(); err != nil { + return err + } + logger.Info("开区导入表结构成功") + return +} diff --git a/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/mysqlcmd.go b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/mysqlcmd.go index 9dee772232..0f3c387d2b 100644 --- a/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/mysqlcmd.go +++ b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/mysqlcmd.go @@ -67,6 +67,8 @@ func NewMysqlCommand() *cobra.Command { OSInfoGetCommand(), NewStandardizeMySQLCommand(), NewStandardizeProxyCommand(), + NewMysqlDataMigrateDumpCommand(), + NewMysqlDataMigrateImportCommand(), }, }, { diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/execute_partition_sql.go b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/execute_partition_sql.go index 305e9ff4fc..1a909639d4 100644 --- a/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/execute_partition_sql.go +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/execute_partition_sql.go @@ -41,6 +41,8 @@ type ExcutePartitionSQLComp struct { // ExcutePartitionSQLParam TODO type ExcutePartitionSQLParam struct { BkBizId int `json:"bk_biz_id"` + DbAppAbbr string `json:"db_app_abbr"` // 业务名称缩写 + BkBizName string `json:"bk_biz_name"` // 业务名称 ClusterId int `json:"cluster_id"` ImmuteDomain string `json:"immute_domain"` MasterIp string `json:"master_ip" validate:"required,ip"` // 当前实例的主机地址 @@ -63,8 +65,9 @@ type ExcutePartitionSQLObj struct { // InitPartitionContent TODO type InitPartitionContent struct { - NeedSize int64 `json:"need_size"` - Sql string `json:"sql"` + NeedSize int64 `json:"need_size"` + Sql string `json:"sql"` + HasUniqueKey bool `json:"has_unique_key"` } // ExcutePartitionSQLRunTimeCtx TODO @@ -176,7 +179,22 @@ func (e *ExcutePartitionSQLComp) Excute() (err error) { err = e.excuteOne(dbw, initPartition, errfile, 10) } else { // 初始化分区使用pt工具,因此通过命令行的形式进行执行 - err = e.excuteInitSql(eb.InitPartition, errfile, 10) + // err = e.excuteInitSql(eb.InitPartition, errfile, 10) + // 写一个方法 有唯一键的和没有的分成两个数组 分别执行 + // 没有唯一键的不能用pt工具 + + fmt.Printf("%+v", e.Params) + + hasUnikeyInit, hasNotUnikeyInit := e.initSQLClassify(eb.InitPartition) + if len(hasUnikeyInit) > 0 { + // 有唯一键的可以使用pt工具执行 + err = e.excuteInitSql(hasUnikeyInit, errfile, 10) + } + + if len(hasNotUnikeyInit) > 0 { + err = e.excuteOne(dbw, hasNotUnikeyInit, errfile, 10) + } + } if err != nil { lock.Lock() @@ -233,6 +251,8 @@ func (e *ExcutePartitionSQLComp) Excute() (err error) { body.Dimension["ticket"] = e.Params.Ticket body.Dimension["cluster_domain"] = e.Params.ImmuteDomain body.Dimension["shard_name"] = e.Params.ShardName + body.Dimension["db_app_abbr"] = e.Params.DbAppAbbr + body.Dimension["bk_biz_name"] = e.Params.BkBizName manager := ma.NewManager("http://127.0.0.1:9999") sendErr := manager.SendEvent(body.Name, body.Content, body.Dimension) errs = append(errs, strings.Join(errsall, ";\n")) @@ -413,3 +433,18 @@ func (e *ExcutePartitionSQLComp) getNewPartitionSQL(partitionSQLs []string) []st } return newPartitionSQLs } + +func (e *ExcutePartitionSQLComp) initSQLClassify(initPartitions []InitPartitionContent) ( + []InitPartitionContent, []string) { + var hasUnikeyInit []InitPartitionContent + var hasNotUnikeyInit []string + + for _, initPartition := range initPartitions { + if initPartition.HasUniqueKey { + hasUnikeyInit = append(hasUnikeyInit, initPartition) + } else { + hasNotUnikeyInit = append(hasNotUnikeyInit, initPartition.Sql) + } + } + return hasUnikeyInit, hasNotUnikeyInit +} diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/open_area_dump_schema.go b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/open_area_dump_schema.go index e34c6e47a6..8bb4f1fe5a 100644 --- a/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/open_area_dump_schema.go +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/open_area_dump_schema.go @@ -53,7 +53,8 @@ type OpenAreaDumpSchemaParam struct { // OneOpenAreaSchema 用于存放一个区库表的信息 type OneOpenAreaSchema struct { Schema string `json:"schema"` // 指定dump的库 - Tbales []string `json:"tables"` + Tables []string `json:"tables"` + DbList []string `json:"db_list"` // 用于兼容mysql数据迁移 } // OpenAreaDumpSchemaRunTimeCtx TODO @@ -85,11 +86,11 @@ func (c *OpenAreaDumpSchemaComp) Example() interface{} { OpenAreaParam: []OneOpenAreaSchema{ { Schema: "data1", - Tbales: []string{"tb1", "tb2"}, + Tables: []string{"tb1", "tb2"}, }, { Schema: "data2", - Tbales: []string{"tb1", "tb2"}, + Tables: []string{"tb1", "tb2"}, }, }, }, @@ -172,7 +173,7 @@ func (c *OpenAreaDumpSchemaComp) OpenAreaDumpSchema() (err error) { var dumper mysqlutil.Dumper outputfileName := fmt.Sprintf("%s.sql", oneOpenAreaSchema.Schema) schema := fmt.Sprintf("%s %s", - oneOpenAreaSchema.Schema, strings.Join(oneOpenAreaSchema.Tbales, " "), + oneOpenAreaSchema.Schema, strings.Join(oneOpenAreaSchema.Tables, " "), ) // 导出表结构,同时导出存储过程、触发器、event dumper = &mysqlutil.MySQLDumperTogether{ @@ -210,13 +211,13 @@ func (c *OpenAreaDumpSchemaComp) OpenAreaDumpData() (err error) { for _, oneOpenAreaSchema := range c.Params.OpenAreaParam { var dumper mysqlutil.Dumper - if len(oneOpenAreaSchema.Tbales) == 0 { + if len(oneOpenAreaSchema.Tables) == 0 { continue } outputfileName := fmt.Sprintf("%s.sql", oneOpenAreaSchema.Schema) schema := fmt.Sprintf("%s %s", - oneOpenAreaSchema.Schema, strings.Join(oneOpenAreaSchema.Tbales, " "), + oneOpenAreaSchema.Schema, strings.Join(oneOpenAreaSchema.Tables, " "), ) dumper = &mysqlutil.MySQLDumperTogether{ @@ -249,6 +250,46 @@ func (c *OpenAreaDumpSchemaComp) OpenAreaDumpData() (err error) { return nil } +// MysqlDataMigrate 用于mysql数据迁移,需要导出库表结构和数据 +func (c *OpenAreaDumpSchemaComp) MysqlDataMigrate() (err error) { + for _, oneOpenAreaSchema := range c.Params.OpenAreaParam { + for _, db := range oneOpenAreaSchema.DbList { + var dumper mysqlutil.Dumper + // schema := strings.Join(oneOpenAreaSchema.DbList, " ") + outputfileName := fmt.Sprintf("%s.sql", db) + + // 导出库,同时导出存储过程、触发器、event + dumper = &mysqlutil.MySQLDumperTogether{ + MySQLDumper: mysqlutil.MySQLDumper{ + DumpDir: c.dumpDirPath, + Ip: c.Params.Host, + Port: c.Params.Port, + DbBackupUser: c.GeneralParam.RuntimeAccountParam.AdminUser, + DbBackupPwd: c.GeneralParam.RuntimeAccountParam.AdminPwd, + DbNames: []string{db}, + DumpCmdFile: c.dumpCmd, + Charset: c.charset, + MySQLDumpOption: mysqlutil.MySQLDumpOption{ + DumpRoutine: true, + DumpTrigger: true, + DumpEvent: true, + NeedUseDb: true, + GtidPurgedOff: c.GtidPurgedOff, + }, + }, + OutputfileName: outputfileName, + } + if err := dumper.Dump(); err != nil { + logger.Error("dump failed: ", err.Error()) + return err + } + } + + } + + return nil +} + // CompressDumpDir TODO func (c *OpenAreaDumpSchemaComp) CompressDumpDir() (err error) { // // 如果不上传制品库,则不用压缩 diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/open_area_import_schema.go b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/open_area_import_schema.go index 3e1cd117a9..7e48d53e5e 100644 --- a/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/open_area_import_schema.go +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/open_area_import_schema.go @@ -48,8 +48,9 @@ type OpenAreaImportSchemaParam struct { // OneOpenAreaImportSchema TODO type OneOpenAreaImportSchema struct { - Schema string `json:"schema"` // 指定dump的库 - NewDB string `json:"newdb"` + Schema string `json:"schema"` // 指定dump的库 + NewDB string `json:"newdb"` + DbList []string `json:"db_list"` } // OpenAreaImportSchemaRunTimeCtx TODO @@ -209,6 +210,24 @@ func (c *OpenAreaImportSchemaComp) CreateNewDatabase() (err error) { return } +// CreateDatabase TODO +func (c *OpenAreaImportSchemaComp) CreateDatabase() (err error) { + // 导出文件有create database语句,这里先创建一次,重复的话报错终止 + for _, oneShemaInfo := range c.Params.OpenAreaParam { + for _, db := range oneShemaInfo.DbList { + createDBSql := fmt.Sprintf("create database `%s` charset %s;", + db, c.charset) + _, err := c.conn.Exec(createDBSql) + if err != nil { + logger.Error("create db %s got an error:%s", db, err.Error()) + return err + } + } + + } + return +} + // OpenAreaImportSchema TODO func (c *OpenAreaImportSchemaComp) OpenAreaImportSchema() (err error) { for _, oneShemaInfo := range c.Params.OpenAreaParam { @@ -255,6 +274,32 @@ func (c *OpenAreaImportSchemaComp) OpenAreaImportData() (err error) { return nil } +// MysqlDataMigrateImport TODO +func (c *OpenAreaImportSchemaComp) MysqlDataMigrateImport() (err error) { + for _, oneShemaInfo := range c.Params.OpenAreaParam { + for _, db := range oneShemaInfo.DbList { + dataFileName := fmt.Sprintf("%s.sql", db) + err = mysqlutil.ExecuteSqlAtLocal{ + IsForce: false, + Charset: c.charset, + NeedShowWarnings: false, + Host: c.Params.Host, + Port: c.Params.Port, + Socket: c.socket, + WorkDir: c.dumpDir, + User: c.GeneralParam.RuntimeAccountParam.AdminUser, + Password: c.GeneralParam.RuntimeAccountParam.AdminPwd, + }.ExcuteSqlByMySQLClientOne(dataFileName, db) + if err != nil { + logger.Error("执行%s文件失败!", dataFileName) + return err + } + } + + } + return nil +} + // CleanDumpDir TODO func (c *OpenAreaImportSchemaComp) CleanDumpDir() (err error) { return diff --git a/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomredis/cluster_forget.go b/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomredis/cluster_forget.go index 99c898b18c..ecf518fa26 100644 --- a/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomredis/cluster_forget.go +++ b/dbm-services/redis/db-tools/dbactuator/pkg/atomjobs/atomredis/cluster_forget.go @@ -3,6 +3,7 @@ package atomredis import ( "encoding/json" "fmt" + "strings" "time" "dbm-services/redis/db-tools/dbactuator/models/myredis" @@ -189,8 +190,9 @@ func (job *RedisClusterForget) clusterForgetNode( defer nodeConn.Close() if err := nodeConn.ClusterForget(fnode.NodeID); err != nil { - job.runtime.Logger.Error("forget node %s:%s failed :+%v", fnode.Addr, fnode.NodeID, err) - if ignoreErr { + // // (error) ERR:18,msg:forget node unkown 传了不存在的NodeID 1. 节点表中找不到指定的节点标识。 !!! 这里和官方版本返回错误不一致 !!! + job.runtime.Logger.Error("forget node %s:%s failed :+%v [just Ignore::%+v]", fnode.Addr, fnode.NodeID, err, fnode) + if ignoreErr && (strings.Contains(err.Error(), "Unknown node") || strings.Contains(err.Error(), "node unkown")) { job.runtime.Logger.Warn("current node status maybe fail, ignore %s:%+v", node.Addr, err) continue } diff --git a/dbm-ui/backend/components/mysql_priv_manager/client.py b/dbm-ui/backend/components/mysql_priv_manager/client.py index e3fb458526..cd7c8caaf7 100644 --- a/dbm-ui/backend/components/mysql_priv_manager/client.py +++ b/dbm-ui/backend/components/mysql_priv_manager/client.py @@ -31,6 +31,11 @@ def __init__(self): url="/priv/get_account_rule_list", description=_("帐号规则清单"), ) + self.pre_check_add_account_rule = self.generate_data_api( + method="POST", + url="/priv/add_account_rule_dry_run", + description=_("添加帐号规则前置检查"), + ) self.add_account_rule = self.generate_data_api( method="POST", url="/priv/add_account_rule", diff --git a/dbm-ui/backend/configuration/handlers/password.py b/dbm-ui/backend/configuration/handlers/password.py index 4bda9c45fa..f962fe13ee 100644 --- a/dbm-ui/backend/configuration/handlers/password.py +++ b/dbm-ui/backend/configuration/handlers/password.py @@ -27,6 +27,7 @@ from backend.core.encrypt.constants import AsymmetricCipherConfigType from backend.core.encrypt.handlers import AsymmetricHandler from backend.db_meta.enums import ClusterType, InstanceInnerRole, InstanceRole, TenDBClusterSpiderRole +from backend.db_meta.models import Machine from backend.db_periodic_task.models import DBPeriodicTask from backend.db_services.ipchooser.query.resource import ResourceQueryHelper from backend.utils.string import base64_decode, base64_encode @@ -72,12 +73,19 @@ def verify_password_strength(cls, password: str, echo: bool = False): @classmethod def query_mysql_admin_password( - cls, limit: int, offset: int, instances: List[str] = None, begin_time: str = None, end_time: str = None + cls, + limit: int, + offset: int, + bk_biz_id: int = None, + instances: List[str] = None, + begin_time: str = None, + end_time: str = None, ): """ 获取mysql的admin密码 @param limit: 分页限制 @param offset: 分页起始 + @param bk_biz_id: 业务ID @param instances: 实例列表 @param begin_time: 过滤开始时间 @param end_time: 过滤结束时间 @@ -85,12 +93,18 @@ def query_mysql_admin_password( instances = instances or [] # 获取过滤条件 instance_list = [] - try: - for address in instances: + for address in instances: + split_len = len(address.split(":")) + if split_len == 2: + # 输入ip:port + ip, port = address.split(":") + instance_list.append({"ip": ip, "port": int(port)}) + elif split_len == 3: + # 输入bk_cloud_id:ip:port bk_cloud_id, ip, port = address.split(":") - instance_list.append({"ip": ip, "port": int(port), "bk_cloud_id": int(bk_cloud_id)}) - except (IndexError, ValueError): - raise PasswordPolicyBaseException(_("请保证查询的实例输入格式合法,格式为[云区域:IP:PORT]")) + instance_list.append({"ip": ip, "port": int(port), "bk_cloud_id": bk_cloud_id}) + else: + raise PasswordPolicyBaseException(_("请保证查询的实例输入格式合法,格式为[CLOUD_ID:]IP:PORT")) filters = {"limit": limit, "offset": offset, "component": DBType.MySQL.value, "username": MYSQL_ADMIN_USER} if instance_list: @@ -99,6 +113,8 @@ def query_mysql_admin_password( filters.update(begin_time=begin_time) if end_time: filters.update(end_time=end_time) + if bk_biz_id: + filters.update(bk_biz_id=bk_biz_id) # 获取密码生效实例结果 mysql_admin_password_data = DBPrivManagerApi.get_mysql_admin_password(params=filters) @@ -119,7 +135,9 @@ def modify_admin_password(cls, operator: str, password: str, lock_hour: int, ins @param lock_hour: 锁定时长 @param instance_list: 修改的实例列表 """ - + # 获取业务信息,任取一台machine查询 + machine = Machine.objects.get(bk_cloud_id=instance_list[0]["bk_cloud_id"], ip=instance_list[0]["ip"]) + bk_biz_id = machine.bk_biz_id # 根据cluster_type, bk_cloud_id, role将实例分类后聚合 aggregate_instance: Dict[str, Dict[str, Dict[str, List]]] = defaultdict( lambda: defaultdict(lambda: defaultdict(list)) @@ -134,7 +152,12 @@ def modify_admin_password(cls, operator: str, password: str, lock_hour: int, ins for cluster_type, role_instances in clusters.items(): instances_info = [{"role": role, "addresses": insts} for role, insts in role_instances.items()] cluster_infos.append( - {"bk_cloud_id": bk_cloud_id, "cluster_type": cluster_type, "instances": instances_info} + { + "bk_cloud_id": bk_cloud_id, + "cluster_type": cluster_type, + "instances": instances_info, + "bk_biz_id": bk_biz_id, + } ) # 根据cluster info获取DB类型,这里保证修改的实例属于同一组件 diff --git a/dbm-ui/backend/configuration/serializers.py b/dbm-ui/backend/configuration/serializers.py index f1870b72c8..5947fcc15a 100644 --- a/dbm-ui/backend/configuration/serializers.py +++ b/dbm-ui/backend/configuration/serializers.py @@ -93,6 +93,7 @@ class GetMySQLAdminPasswordSerializer(serializers.Serializer): limit = serializers.IntegerField(help_text=_("分页限制"), required=False, default=10) offset = serializers.IntegerField(help_text=_("分页起始"), required=False, default=0) + bk_biz_id = serializers.IntegerField(help_text=_("业务ID"), required=False) begin_time = DBTimezoneField(help_text=_("开始时间"), required=False) end_time = DBTimezoneField(help_text=_("结束时间"), required=False) instances = serializers.CharField(help_text=_("过滤的实例列表(通过,分割,实例格式为--cloud:ip:port)"), required=False) diff --git a/dbm-ui/backend/core/storages/handlers.py b/dbm-ui/backend/core/storages/handlers.py index b2f5da5beb..2c1004e27c 100644 --- a/dbm-ui/backend/core/storages/handlers.py +++ b/dbm-ui/backend/core/storages/handlers.py @@ -16,6 +16,7 @@ from bkstorages.exceptions import RequestError as BKStorageError from rest_framework.status import HTTP_200_OK +from backend import env from backend.core.storages.storage import CustomBKRepoStorage, get_storage from backend.exceptions import ApiRequestError, ApiResultError @@ -79,3 +80,20 @@ def delete_file(self, file_path) -> bool: raise ApiRequestError(e) return True + + def create_bkrepo_access_token(self, path: str): + """ + 获取制品库临时凭证,并返回制品库相关信息 + :param path: 授权路径 + """ + # 过期时间默认一天,且限制访问1次 + expire_time = 3600 * 24 + permits = 1 + data = self.storage.client.create_bkrepo_access_token(paths=[path], expire_time=expire_time, permits=permits) + return { + "token": data[0]["token"], + "url": env.BKREPO_ENDPOINT_URL, + "project": env.BKREPO_PROJECT, + "repo": env.BKREPO_BUCKET, + "path": path, + } diff --git a/dbm-ui/backend/core/storages/serializers.py b/dbm-ui/backend/core/storages/serializers.py index 93b3b0bcfa..dfdbb8bbf4 100644 --- a/dbm-ui/backend/core/storages/serializers.py +++ b/dbm-ui/backend/core/storages/serializers.py @@ -19,3 +19,7 @@ class BatchDownloadFileSerializer(serializers.Serializer): class FileSerializer(serializers.Serializer): file_path = serializers.CharField(help_text=_("文件路径")) + + +class CreateTokenSerializer(serializers.Serializer): + file_path = serializers.CharField(help_text=_("文件路径")) diff --git a/dbm-ui/backend/core/storages/storage.py b/dbm-ui/backend/core/storages/storage.py index d122712438..390db4232b 100644 --- a/dbm-ui/backend/core/storages/storage.py +++ b/dbm-ui/backend/core/storages/storage.py @@ -82,6 +82,26 @@ def batch_download(self, paths: List[str]) -> requests.Response: logger.info("Calling BkRepo: %s", curlify.to_curl(resp.request)) return resp + def create_bkrepo_access_token(self, paths: List[str], expire_time: int, permits: int) -> dict: + """ + 返回制品库临时凭证 + """ + client = self.get_client() + url = urljoin(self.endpoint_url, "/generic/temporary/token/create") + data = { + "projectId": self.project, + "repoName": self.bucket, + "fullPathSet": paths, + "expireSeconds": expire_time, + "permits": permits, + "type": "ALL", + } + resp = client.post(url, json=data, timeout=TIMEOUT_THRESHOLD) + if not resp.ok: + logger.error("Request success, but the server rejects receive token. ") + + return self._validate_resp(resp) + @deconstructible class CustomBKRepoStorage(BaseStorage, bkrepo.BKRepoStorage): diff --git a/dbm-ui/backend/core/storages/views.py b/dbm-ui/backend/core/storages/views.py index c444fbf91f..4cdfc69666 100644 --- a/dbm-ui/backend/core/storages/views.py +++ b/dbm-ui/backend/core/storages/views.py @@ -15,7 +15,7 @@ from backend.bk_web import viewsets from backend.bk_web.swagger import common_swagger_auto_schema from backend.core.storages.handlers import StorageHandler -from backend.core.storages.serializers import BatchDownloadFileSerializer, FileSerializer +from backend.core.storages.serializers import BatchDownloadFileSerializer, CreateTokenSerializer, FileSerializer SWAGGER_TAG = "storage" @@ -42,3 +42,11 @@ def fetch_file_content(self, request): def delete_file(self, request): file_path = self.params_validate(self.get_serializer_class())["file_path"] return Response(StorageHandler().delete_file(file_path=file_path)) + + @common_swagger_auto_schema( + operation_summary=_("获取临时凭证"), request_body=CreateTokenSerializer(), tags=[SWAGGER_TAG] + ) + @action(methods=["POST"], detail=False, serializer_class=CreateTokenSerializer) + def create_bkrepo_access_token(self, request): + file_path = self.params_validate(self.get_serializer_class())["file_path"] + return Response(StorageHandler().create_bkrepo_access_token(path=file_path)) diff --git a/dbm-ui/backend/db_meta/models/spec.py b/dbm-ui/backend/db_meta/models/spec.py index 67b654962a..e2b3318da5 100644 --- a/dbm-ui/backend/db_meta/models/spec.py +++ b/dbm-ui/backend/db_meta/models/spec.py @@ -81,13 +81,10 @@ def capacity(self): def _get_apply_params_detail( self, group_mark, count, bk_cloud_id, affinity=AffinityEnum.NONE.value, location_spec=None ): - # 如果没有城市信息,则自动忽略亲和性(default表示无城市信息) + # 如果没有城市信息,default表示无城市信息 if location_spec and location_spec["city"] == "default": location_spec = None - if not location_spec: - affinity = AffinityEnum.NONE.value - # 获取资源申请的detail过程,暂时忽略亲和性和位置参数过滤 spec_offset = SystemSettings.get_setting_value(SystemSettingsEnum.SPEC_OFFSET) apply_params = { diff --git a/dbm-ui/backend/db_periodic_task/local_tasks/mysql_backup/check_full_backup.py b/dbm-ui/backend/db_periodic_task/local_tasks/mysql_backup/check_full_backup.py index 35e5243784..b19f21ab7a 100644 --- a/dbm-ui/backend/db_periodic_task/local_tasks/mysql_backup/check_full_backup.py +++ b/dbm-ui/backend/db_periodic_task/local_tasks/mysql_backup/check_full_backup.py @@ -72,12 +72,13 @@ def _build_backup_info_files(backups_info: []): backups[bid].is_full_backup = i.get("is_full_backup") for f in i.get("file_list", []): - bf = BackupFile(f.get("file_name"), f.get("file_size"), f.get("file_type"), f.get("task_id")) - if f.get("file_type") == "index": + file_type = f.get("file_type") + bf = BackupFile(f.get("file_name"), f.get("file_size"), file_type, f.get("task_id")) + if file_type == "index": backups[bid].file_index = bf - elif f.get("file_type") == "priv": + elif file_type == "priv": backups[bid].file_priv = bf - elif f.get("file_type") == "tar": + elif file_type == "tar" or file_type == "part": backups[bid].file_tar.append(bf) else: pass diff --git a/dbm-ui/backend/db_services/cluster_entry/handlers.py b/dbm-ui/backend/db_services/cluster_entry/handlers.py new file mode 100644 index 0000000000..11daae77dc --- /dev/null +++ b/dbm-ui/backend/db_services/cluster_entry/handlers.py @@ -0,0 +1,83 @@ +# -*- coding: utf-8 -*- +""" +TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-DB管理系统(BlueKing-BK-DBM) available. +Copyright (C) 2017-2023 THL A29 Limited, a Tencent company. All rights reserved. +Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. +You may obtain a copy of the License at https://opensource.org/licenses/MIT +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on +an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the +specific language governing permissions and limitations under the License. +""" +import logging + +from backend.db_meta.enums import ClusterEntryType +from backend.db_meta.exceptions import InstanceNotExistException +from backend.db_meta.models import Cluster, ClusterEntry, ProxyInstance, StorageInstance +from backend.db_services.dbbase.resources.query import ListRetrieveResource +from backend.flow.utils.dns_manage import DnsManage + +logger = logging.getLogger("root") + + +class ClusterEntryHandler: + def __init__(self, cluster_id): + self.cluster_id = cluster_id + self.cluster = Cluster.objects.get(id=self.cluster_id) + + def refresh_cluster_domain(self, cluster_entry_details): + for detail in cluster_entry_details: + # 修改 DNS 记录 + cluster_entry_ins_map = {} + if detail["cluster_entry_type"] == ClusterEntryType.DNS: + cluster_entry = ClusterEntry.objects.get( + cluster__id=self.cluster_id, + cluster_entry_type=ClusterEntryType.DNS, + entry=detail["domain_name"], + ) + if cluster_entry.id not in cluster_entry_ins_map: + cluster_entry_ins_map[cluster_entry.id] = { + "cluster_entry": cluster_entry, + "storage_instances": [], + "proxy_instances": [], + } + # 修改实例与访问入口的绑定关系 + for target_instance in detail["target_instances"]: + ip, port = target_instance.split("#") + filter_condition = dict(machine__ip=ip, cluster=self.cluster) + storage_ins = StorageInstance.objects.filter(**filter_condition).first() + if storage_ins is None: + proxy_ins = ProxyInstance.objects.filter(**filter_condition).first() + if proxy_ins is None: + raise InstanceNotExistException(bk_cloud_id=self.cluster.bk_cloud_id, ip=ip, port=port) + else: + # 重新绑定代理实例 + cluster_entry_ins_map[cluster_entry.id]["proxy_instances"].append(proxy_ins) + else: + # 重新绑定存储实例 + cluster_entry_ins_map[cluster_entry.id]["storage_instances"].append(storage_ins) + + DnsManage(self.cluster.bk_biz_id, self.cluster.bk_cloud_id).refresh_cluster_domain( + detail["domain_name"], detail["target_instances"] + ) + + # 重新绑定实例 + for cluster_entry_info in cluster_entry_ins_map.values(): + cluster_entry = cluster_entry_info["cluster_entry"] + # 清除并重新绑定实例 + if cluster_entry_info["proxy_instances"]: + cluster_entry.proxyinstance_set.set(cluster_entry_info["proxy_instances"]) + if cluster_entry_info["storage_instances"]: + cluster_entry.storageinstance_set.set(cluster_entry_info["storage_instances"]) + + def get_cluster_entries(self, bk_biz_id, cluster_entry_type=None): + + extra = {"cluster_entry_type": cluster_entry_type} if cluster_entry_type else {} + cluster_entries = ListRetrieveResource.query_cluster_entry_details( + { + "id": self.cluster.id, + "bk_cloud_id": self.cluster.bk_cloud_id, + "bk_biz_id": bk_biz_id, + }, + **extra, + ) + return cluster_entries diff --git a/dbm-ui/backend/db_services/cluster_entry/views.py b/dbm-ui/backend/db_services/cluster_entry/views.py index 3059233203..9aa764947f 100644 --- a/dbm-ui/backend/db_services/cluster_entry/views.py +++ b/dbm-ui/backend/db_services/cluster_entry/views.py @@ -16,11 +16,10 @@ from backend.bk_web import viewsets from backend.bk_web.swagger import common_swagger_auto_schema -from backend.db_meta.enums import ClusterEntryType, ClusterType +from backend.db_meta.enums import ClusterType from backend.db_meta.models import Cluster +from backend.db_services.cluster_entry.handlers import ClusterEntryHandler from backend.db_services.cluster_entry.serializers import ModifyClusterEntrySerializer, RetrieveClusterEntrySLZ -from backend.db_services.dbbase.resources.query import ListRetrieveResource -from backend.flow.utils.dns_manage import DnsManage from backend.iam_app.dataclass.actions import ActionEnum from backend.iam_app.handlers.drf_perm.base import ( BizDBTypeResourceActionPermission, @@ -65,13 +64,10 @@ def get_default_permission_class(self) -> list: @action(methods=["POST"], detail=False, serializer_class=ModifyClusterEntrySerializer) def refresh_cluster_domain(self, request, *args, **kwargs): data = self.params_validate(self.get_serializer_class()) - cluster = Cluster.objects.get(id=data["cluster_id"]) - for detail in data["cluster_entry_details"]: - if detail["cluster_entry_type"] == ClusterEntryType.DNS: - DnsManage(cluster.bk_biz_id, cluster.bk_cloud_id).refresh_cluster_domain( - detail["domain_name"], detail["target_instances"] - ) - return Response({}) + ClusterEntryHandler(cluster_id=data["cluster_id"]).refresh_cluster_domain( + cluster_entry_details=data["cluster_entry_details"] + ) + return Response() @common_swagger_auto_schema( operation_summary=_("获取集群入口列表"), @@ -87,18 +83,8 @@ def refresh_cluster_domain(self, request, *args, **kwargs): ) def get_cluster_entries(self, request, *args, **kwargs): """获取集群入口列表""" - - cluster = Cluster.objects.get(id=self.validated_data["cluster_id"]) - - cluster_entry_type = self.validated_data.get("entry_type") - extra = {"cluster_entry_type": cluster_entry_type} if cluster_entry_type else {} - cluster_entries = ListRetrieveResource.query_cluster_entry_details( - { - "id": cluster.id, - "bk_cloud_id": cluster.bk_cloud_id, - "bk_biz_id": cluster.bk_biz_id, - }, - **extra + data = self.params_validate(self.get_serializer_class()) + cluster_entries = ClusterEntryHandler(cluster_id=data["cluster_id"]).get_cluster_entries( + data["bk_biz_id"], data.get("entry_type") ) - return Response(cluster_entries) diff --git a/dbm-ui/backend/db_services/mysql/cluster/handlers.py b/dbm-ui/backend/db_services/mysql/cluster/handlers.py index 5d935e1183..489acb4eca 100644 --- a/dbm-ui/backend/db_services/mysql/cluster/handlers.py +++ b/dbm-ui/backend/db_services/mysql/cluster/handlers.py @@ -85,6 +85,11 @@ def _fill_spider_instance_info(_cluster: Cluster, _cluster_info: Dict): filter_conditions = Q() for cluster_filter in cluster_filters: filter_conditions |= Q(**cluster_filter.export_filter_conditions()) + # 限制业务和集群类型只能是mysql & tendbcluster + filter_conditions &= Q( + cluster_type__in=[ClusterType.TenDBSingle, ClusterType.TenDBHA, ClusterType.TenDBCluster], + bk_biz_id=self.bk_biz_id, + ) clusters: QuerySet = Cluster.objects.prefetch_related("storageinstance_set", "proxyinstance_set").filter( filter_conditions diff --git a/dbm-ui/backend/db_services/mysql/cluster/views.py b/dbm-ui/backend/db_services/mysql/cluster/views.py index 642348c1d9..fdb3afc0d4 100644 --- a/dbm-ui/backend/db_services/mysql/cluster/views.py +++ b/dbm-ui/backend/db_services/mysql/cluster/views.py @@ -49,11 +49,8 @@ class ClusterViewSet(BaseClusterViewSet): def query_clusters(self, request, bk_biz_id): # TODO: Deprecated, 这个视图方法将被移除,请不要调用 validated_data = self.params_validate(self.get_serializer_class()) - return Response( - ClusterServiceHandler(bk_biz_id).query_clusters( - [ClusterFilter.from_dict(filter_dict) for filter_dict in validated_data["cluster_filters"]] - ) - ) + cluster_filters = [ClusterFilter.from_dict(filter_dict) for filter_dict in validated_data["cluster_filters"]] + return Response(ClusterServiceHandler(bk_biz_id).query_clusters(cluster_filters=cluster_filters)) @common_swagger_auto_schema( operation_summary=_("查询tendbcluster集群的remote相关角色机器"), diff --git a/dbm-ui/backend/db_services/mysql/permission/db_account/handlers.py b/dbm-ui/backend/db_services/mysql/permission/db_account/handlers.py index 1b86a57444..702e0b5ce4 100644 --- a/dbm-ui/backend/db_services/mysql/permission/db_account/handlers.py +++ b/dbm-ui/backend/db_services/mysql/permission/db_account/handlers.py @@ -10,10 +10,13 @@ """ import logging +from typing import Any, Optional from django.utils.translation import ugettext as _ +from backend.components import DBPrivManagerApi from backend.db_services.dbpermission.constants import PrivilegeType +from backend.db_services.dbpermission.db_account.dataclass import AccountRuleMeta from backend.db_services.dbpermission.db_account.handlers import AccountHandler from backend.db_services.mysql.permission.exceptions import DBPermissionBaseException @@ -25,6 +28,25 @@ class MySQLAccountHandler(AccountHandler): 封装账号相关的处理操作 """ + def pre_check_add_account_rule(self, account_rule: AccountRuleMeta) -> Optional[Any]: + """ + - 添加账号规则前置检查 + @param account_rule: 账号规则元信息 + """ + account_rule_params = { + "bk_biz_id": self.bk_biz_id, + "operator": self.operator, + "cluster_type": self.account_type, + "account_id": account_rule.account_id, + "priv": account_rule.privilege, + "dbname": account_rule.access_db, + } + resp = DBPrivManagerApi.pre_check_add_account_rule(params=account_rule_params, raw=True) + # 如果不允许执行,说明前置检查失败,抛出message + if not resp["data"]["force_run"]: + raise DBPermissionBaseException(_("创建授权规则前置检查失败,错误信息: {}").format(resp["message"])) + return {"force_run": resp["data"]["force_run"], "warning": resp["message"]} + def has_high_risk_privileges(self, rule_sets): """ - 判断是否有高危权限 diff --git a/dbm-ui/backend/db_services/mysql/permission/db_account/views.py b/dbm-ui/backend/db_services/mysql/permission/db_account/views.py index 92ab6b55be..58d379d15a 100644 --- a/dbm-ui/backend/db_services/mysql/permission/db_account/views.py +++ b/dbm-ui/backend/db_services/mysql/permission/db_account/views.py @@ -8,9 +8,23 @@ an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ -from backend.db_services.dbpermission.constants import AccountType -from backend.db_services.dbpermission.db_account.views import BaseDBAccountViewSet +from django.utils.translation import ugettext as _ +from rest_framework.decorators import action + +from backend.bk_web.swagger import common_swagger_auto_schema +from backend.db_services.dbpermission.db_account.serializers import AddAccountRuleSerializer +from backend.db_services.dbpermission.db_account.views import SWAGGER_TAG, BaseDBAccountViewSet +from backend.db_services.mysql.permission.db_account.handlers import MySQLAccountHandler class DBAccountViewSet(BaseDBAccountViewSet): - account_type = AccountType.MYSQL + account_handler = MySQLAccountHandler + + @common_swagger_auto_schema( + operation_summary=_("添加账号规则前置检查"), request_body=AddAccountRuleSerializer(), tags=[SWAGGER_TAG] + ) + @action(methods=["POST"], detail=False, serializer_class=AddAccountRuleSerializer) + def pre_check_add_account_rule(self, request, bk_biz_id): + return self._view_common_handler( + request, bk_biz_id, self.account_rule_meta, self.account_handler.pre_check_add_account_rule.__name__ + ) diff --git a/dbm-ui/backend/db_services/mysql/sqlparse/__init__.py b/dbm-ui/backend/db_services/mysql/sqlparse/__init__.py new file mode 100644 index 0000000000..aa5085c628 --- /dev/null +++ b/dbm-ui/backend/db_services/mysql/sqlparse/__init__.py @@ -0,0 +1,10 @@ +# -*- coding: utf-8 -*- +""" +TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-DB管理系统(BlueKing-BK-DBM) available. +Copyright (C) 2017-2023 THL A29 Limited, a Tencent company. All rights reserved. +Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. +You may obtain a copy of the License at https://opensource.org/licenses/MIT +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on +an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the +specific language governing permissions and limitations under the License. +""" diff --git a/dbm-ui/backend/db_services/mysql/sqlparse/handlers.py b/dbm-ui/backend/db_services/mysql/sqlparse/handlers.py new file mode 100644 index 0000000000..8f0ac01363 --- /dev/null +++ b/dbm-ui/backend/db_services/mysql/sqlparse/handlers.py @@ -0,0 +1,91 @@ +# -*- coding: utf-8 -*- +""" +TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-DB管理系统(BlueKing-BK-DBM) available. +Copyright (C) 2017-2023 THL A29 Limited, a Tencent company. All rights reserved. +Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. +You may obtain a copy of the License at https://opensource.org/licenses/MIT +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on +an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the +specific language governing permissions and limitations under the License. +""" + +import re + +import sqlparse + +from backend.utils.md5 import count_md5 + + +class SQLParseHandler: + def __init__(self): + self.sql_items = [] + self.commands = set() + self.tables = set() + self.table_token = None + + def parse_tokens(self, tokens): + """ + parse token,用于递归解析 + """ + self.table_token = False + for token in tokens: + if token.ttype in [sqlparse.tokens.DDL, sqlparse.tokens.DML]: + self.commands.add(token.value.upper()) + + # 提取表名 + if token.is_keyword: + if token.value.upper() in ["FROM", "UPDATE", "INTO", "TABLE", "JOIN"]: + self.table_token = True + elif self.table_token: + sub_tokens = getattr(token, "tokens", []) + if isinstance(token, (sqlparse.sql.Identifier, sqlparse.sql.IdentifierList)): + if not any( + isinstance(x, sqlparse.sql.Parenthesis) or "SELECT" in x.value.upper() for x in sub_tokens + ): + fr = "".join(str(j) for j in token if j.value not in {"as", "\n"}) + for t in re.findall(r"(?:\w+\.\w+|\w+)\s+\w+|(?:\w+\.\w+|\w+)", fr): + self.tables.add(t.split()[0]) + self.table_token = False + elif isinstance(token, sqlparse.sql.Function): + for _token in sub_tokens: + if isinstance(_token, sqlparse.sql.Identifier): + self.tables.add(_token.value) + self.table_token = False + + elif token.ttype == sqlparse.tokens.Punctuation: + self.table_token = False + + if token.is_group: + self.parse_tokens(token.tokens) + else: + if token.ttype.parent == sqlparse.tokens.Token.Literal.String: + self.sql_items.append("'?'") + elif token.ttype.parent == sqlparse.tokens.Token.Literal.Number: + self.sql_items.append("?") + else: + self.sql_items.append(token.value) + + def parse_sql(self, sql: str) -> dict: + """ + 解析 SQL + """ + parsed_sqls = sqlparse.parse(sql) + if len(parsed_sqls) == 0: + return {} + tokens = sqlparse.parse(sql)[0].tokens + self.parse_tokens(tokens=tokens) + digest_sql = " ".join(self.sql_items) + for char in ["\r", "\n", "\t"]: + digest_sql.replace(char, " ") + sql.replace(char, " ") + digest_sql = re.sub(r"\s+", " ", digest_sql) + sql = re.sub(r"\s+", " ", sql) + query_digest_md5 = count_md5(digest_sql) + return { + "command": ",".join(sorted(self.commands)), + "query_string": sql.strip(" "), + "query_digest_text": digest_sql.strip(" "), + "query_digest_md5": query_digest_md5, + "table_name": ",".join(sorted(self.tables)), + "query_length": len(sql), + } diff --git a/dbm-ui/backend/db_services/mysql/sqlparse/views.py b/dbm-ui/backend/db_services/mysql/sqlparse/views.py new file mode 100644 index 0000000000..f595d1f5a7 --- /dev/null +++ b/dbm-ui/backend/db_services/mysql/sqlparse/views.py @@ -0,0 +1,15 @@ +# -*- coding:UTF-8 -*- +import json + +from blueapps.account.decorators import login_exempt +from django.http import JsonResponse +from django.views.decorators.csrf import csrf_exempt + +from backend.db_services.mysql.sqlparse.handlers import SQLParseHandler + + +@login_exempt +@csrf_exempt +def parse_sql(request): + sql = json.loads(request.body.decode()).get("content", "") + return JsonResponse(SQLParseHandler().parse_sql(sql=sql)) diff --git a/dbm-ui/backend/db_services/mysql/urls.py b/dbm-ui/backend/db_services/mysql/urls.py index 60f948cc95..ef14a1ca2b 100644 --- a/dbm-ui/backend/db_services/mysql/urls.py +++ b/dbm-ui/backend/db_services/mysql/urls.py @@ -8,7 +8,9 @@ an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ -from django.urls import include, path +from django.urls import include, path, re_path + +from backend.db_services.mysql.sqlparse.views import parse_sql urlpatterns = [ path("bizs//", include("backend.db_services.mysql.resources.urls")), @@ -20,4 +22,5 @@ path("bizs//", include("backend.db_services.mysql.fixpoint_rollback.urls")), path("bizs//", include("backend.db_services.mysql.open_area.urls")), path("bizs//", include("backend.db_services.mysql.dumper.urls")), + re_path("^parse_sql/?$", parse_sql, name="parse_sql"), ] diff --git a/dbm-ui/backend/db_services/partition/handlers.py b/dbm-ui/backend/db_services/partition/handlers.py index a935a4a33c..ae51a03ebf 100644 --- a/dbm-ui/backend/db_services/partition/handlers.py +++ b/dbm-ui/backend/db_services/partition/handlers.py @@ -249,3 +249,7 @@ def _verify_valid_index(_index_keys, _field): raise DBPartitionInvalidFieldException( _("【{}】【{}】分区字段{}与该表对应的字段类型不匹配").format(db, table, partition_column) ) + + # 如果表没有主键 or 唯一键,需要提示用户分区执行会锁表 + if not index_data: + return _("表没有主键或者唯一键,将表改造为分区表的过程中会锁表,会阻塞查询、删除、修改、添加、表结构变更等语句") diff --git a/dbm-ui/backend/db_services/redis/toolbox/handlers.py b/dbm-ui/backend/db_services/redis/toolbox/handlers.py index d8973cba54..b69a7a2755 100644 --- a/dbm-ui/backend/db_services/redis/toolbox/handlers.py +++ b/dbm-ui/backend/db_services/redis/toolbox/handlers.py @@ -18,6 +18,7 @@ from backend import env from backend.db_meta.enums import ClusterType, InstanceRole +from backend.db_meta.enums.comm import RedisVerUpdateNodeType from backend.db_meta.models import Cluster, ProxyInstance, StorageInstance, StorageInstanceTuple from backend.db_services.ipchooser.handlers.host_handler import HostHandler from backend.db_services.ipchooser.query.resource import ResourceQueryHelper @@ -26,6 +27,12 @@ SQL_QUERY_INSTANCES, SQL_QUERY_MASTER_SLAVE_STATUS, ) +from backend.flow.utils.redis.redis_proxy_util import ( + get_cluster_proxy_version, + get_cluster_redis_version, + get_proxy_version_names_by_cluster_type, + get_storage_version_names_by_cluster_type, +) from backend.ticket.constants import InstanceType from backend.ticket.models import ClusterOperateRecord from backend.utils.basic import dictfetchall @@ -269,3 +276,19 @@ def query_cluster_ips(self, limit=None, offset=None, cluster_id=None, ip=None, r response = {"count": total_count, "results": ips} return response + + @classmethod + def get_cluster_versions_with_cluster_id(cls, cluster_id: int, node_type: str): + """根据cluster id获取集群现存版本""" + if node_type == RedisVerUpdateNodeType.Backend.value: + return [get_cluster_redis_version(cluster_id)] + else: + return get_cluster_proxy_version(cluster_id) + + @classmethod + def get_cluster_versions_with_cluster_type(cls, cluster_type: str, node_type: str): + """根据cluster类型获取版本信息""" + if node_type == RedisVerUpdateNodeType.Backend.value: + return get_storage_version_names_by_cluster_type(cluster_type, trimSuffix=True) + else: + return get_proxy_version_names_by_cluster_type(cluster_type, trimSuffix=True) diff --git a/dbm-ui/backend/db_services/redis/toolbox/serializers.py b/dbm-ui/backend/db_services/redis/toolbox/serializers.py index bb7edd2649..b2c410fb6a 100644 --- a/dbm-ui/backend/db_services/redis/toolbox/serializers.py +++ b/dbm-ui/backend/db_services/redis/toolbox/serializers.py @@ -12,6 +12,7 @@ from rest_framework import serializers from backend.db_meta.enums import InstanceStatus +from backend.db_meta.enums.comm import RedisVerUpdateNodeType class QueryByClusterSerializer(serializers.Serializer): @@ -209,3 +210,9 @@ class Meta: "status": "running", } } + + +class GetClusterVersionSerializer(serializers.Serializer): + cluster_id = serializers.IntegerField(help_text=_("集群ID"), required=False) + cluster_type = serializers.CharField(help_text=_("集群类型"), required=False) + node_type = serializers.ChoiceField(help_text=_("集群节点类型"), choices=RedisVerUpdateNodeType.get_choices()) diff --git a/dbm-ui/backend/db_services/redis/toolbox/views.py b/dbm-ui/backend/db_services/redis/toolbox/views.py index d724658fd5..cf88e7d728 100644 --- a/dbm-ui/backend/db_services/redis/toolbox/views.py +++ b/dbm-ui/backend/db_services/redis/toolbox/views.py @@ -17,6 +17,7 @@ from backend.bk_web.swagger import common_swagger_auto_schema from backend.db_services.redis.toolbox.handlers import ToolboxHandler from backend.db_services.redis.toolbox.serializers import ( + GetClusterVersionSerializer, QueryByClusterResultSerializer, QueryByClusterSerializer, QueryByIpResultSerializer, @@ -96,3 +97,18 @@ def query_master_slave_pairs(self, request, bk_biz_id, **kwargs): @action(methods=["GET"], detail=False, serializer_class=None, pagination_class=None) def query_cluster_list(self, request, bk_biz_id, **kwargs): return Response(ToolboxHandler(bk_biz_id).query_cluster_list()) + + @common_swagger_auto_schema( + operation_summary=_("查询集群版本信息"), + query_serializer=GetClusterVersionSerializer(), + tags=[SWAGGER_TAG], + ) + @action(methods=["GET"], detail=False, serializer_class=GetClusterVersionSerializer, pagination_class=None) + def get_cluster_versions(self, request, bk_biz_id, **kwargs): + data = self.params_validate(self.get_serializer_class()) + if "cluster_id" in data: + cluster_id, node_type = data["cluster_id"], data["node_type"] + return Response(ToolboxHandler.get_cluster_versions_with_cluster_id(cluster_id, node_type)) + else: + cluster_type, node_type = data["cluster_type"], data["node_type"] + return Response(ToolboxHandler.get_cluster_versions_with_cluster_type(cluster_type, node_type)) diff --git a/dbm-ui/backend/dbm_init/json_files/bklog/mysql_slowlog.json b/dbm-ui/backend/dbm_init/json_files/bklog/mysql_slowlog.json index 1bdbcf2aa6..87bbc808f0 100644 --- a/dbm-ui/backend/dbm_init/json_files/bklog/mysql_slowlog.json +++ b/dbm-ui/backend/dbm_init/json_files/bklog/mysql_slowlog.json @@ -13,7 +13,7 @@ "target_nodes": [], "etl_params": { "retain_original_text": true, - "separator_regexp": "(?s)#\\s+User@Host:\\s+(?P\\w+)\\[\\w+\\]\\s+@\\s+\\[(?P[^\\]]+)\\].*?Query_time:\\s+(?P\\d+\\.\\d+)\\s+Lock_time:\\s+(?P\\d+\\.\\d+)\\s+Rows_sent:\\s+(?P\\d+)\\s+Rows_examined:\\s+(?P\\d+).*?SET timestamp=(?P\\d+);.*?(?P.*;)" + "separator_regexp": "(?s)#\\s+User@Host:\\s+(?P\\w+)\\[\\w+\\]\\s+@\\s+\\[(?P[^\\]]+)\\].*?(Schema:\\s+(?P\\w+).*?)?Query_time:\\s+(?P\\d+\\.\\d+)\\s+Lock_time:\\s+(?P\\d+\\.\\d+)\\s+Rows_sent:\\s+(?P\\d+)\\s+Rows_examined:\\s+(?P\\d+).*?SET timestamp=(?P\\d+);.*?(?P.*;)" }, "fields": [ { diff --git a/dbm-ui/backend/flow/consts.py b/dbm-ui/backend/flow/consts.py index 874465ac8f..62a75f4a71 100644 --- a/dbm-ui/backend/flow/consts.py +++ b/dbm-ui/backend/flow/consts.py @@ -396,6 +396,8 @@ class DBActuatorActionEnum(str, StructuredEnum): StandardizeMySQLInstance = EnumField("standardize-mysql", _("标准化MySQL实例")) StandardizeTenDBHAProxy = EnumField("standardize-proxy", _("标准化Proxy实例")) Upgrade = EnumField("upgrade", _("本地升级")) + MysqlDataMigrateDump = EnumField("mysql_data_migrate_dump", _("Mysql数据迁移导出库")) + MysqlDataMigrateImport = EnumField("mysql_data_migrate_import", _("Mysql数据迁移导入库")) class RedisActuatorActionEnum(str, StructuredEnum): diff --git a/dbm-ui/backend/flow/engine/bamboo/scene/mysql/common/master_and_slave_switch.py b/dbm-ui/backend/flow/engine/bamboo/scene/mysql/common/master_and_slave_switch.py index b49177977e..bccf4d9a9c 100644 --- a/dbm-ui/backend/flow/engine/bamboo/scene/mysql/common/master_and_slave_switch.py +++ b/dbm-ui/backend/flow/engine/bamboo/scene/mysql/common/master_and_slave_switch.py @@ -76,7 +76,7 @@ def master_and_slave_switch(root_id: str, ticket_data: dict, cluster: Cluster, c # 针对集群维度声明子流程 cluster_switch_sub_pipeline = SubBuilder(root_id=root_id, data=copy.deepcopy(switch_sub_flow_context)) - # 切换前做预检测, 克隆主从时客户端连接检测和checksum检验默认检测 + # 切换前做预检测, 迁移主从时客户端连接检测和checksum检验默认检测 sub_flow = check_sub_flow( uid=ticket_data["uid"], root_id=root_id, diff --git a/dbm-ui/backend/flow/engine/bamboo/scene/mysql/mysql_data_migrate_flow.py b/dbm-ui/backend/flow/engine/bamboo/scene/mysql/mysql_data_migrate_flow.py new file mode 100644 index 0000000000..9bb264afd9 --- /dev/null +++ b/dbm-ui/backend/flow/engine/bamboo/scene/mysql/mysql_data_migrate_flow.py @@ -0,0 +1,202 @@ +""" +TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-DB管理系统(BlueKing-BK-DBM) available. +Copyright (C) 2017-2023 THL A29 Limited, a Tencent company. All rights reserved. +Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. +You may obtain a copy of the License at https://opensource.org/licenses/MIT +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on +an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the +specific language governing permissions and limitations under the License. +""" + +from dataclasses import asdict +from typing import Dict, Optional + +from django.utils.translation import ugettext as _ + +from backend.configuration.constants import DBType +from backend.core.consts import BK_PKG_INSTALL_PATH +from backend.db_meta.enums import ClusterType, InstanceInnerRole +from backend.db_meta.exceptions import ClusterNotExistException, DBMetaException +from backend.db_meta.models import Cluster +from backend.db_services.mysql.sql_import.constants import BKREPO_SQLFILE_PATH +from backend.flow.engine.bamboo.scene.common.builder import Builder, SubBuilder +from backend.flow.engine.bamboo.scene.common.get_file_list import GetFileList +from backend.flow.plugins.components.collections.mysql.exec_actuator_script import ExecuteDBActuatorScriptComponent +from backend.flow.plugins.components.collections.mysql.trans_flies import TransFileComponent +from backend.flow.utils.mysql.mysql_act_dataclass import DownloadMediaKwargs, ExecActuatorKwargs +from backend.flow.utils.mysql.mysql_act_playload import MysqlActPayload + + +class MysqlDataMigrateFlow(object): + def __init__(self, root_id: str, data: Optional[Dict]): + """ + 此flow用于数据迁移 + 1个源集群迁移数据到多个目标集群 + tenDBHA导出导入库表结构、数据都在主db上进行 + @param root_id : 任务流程定义的root_id + @param data : 单据传递参数 + """ + self.root_id = root_id + self.data = data + self.data["uid"] = self.data.get("uid") or self.root_id + self.uid = self.data["uid"] + + self.work_dir = f"{BK_PKG_INSTALL_PATH}/mysql_open_area" + self.migrate_tar_file_name = f"{self.root_id}_migrate.tar.gz" + self.migrate_md5sum_file_name = f"{self.root_id}_migrate.md5sum" + + def __get_cluster_info(self, cluster_id: int, bk_biz_id: int) -> dict: + """ + 获取集群基本信息 source与target共用 + @param cluster_id: 集群cluster_id + @param bk_biz_id: 业务id + @return: + """ + try: + cluster = Cluster.objects.get(id=cluster_id, bk_biz_id=bk_biz_id) + except Cluster.DoesNotExist: + raise ClusterNotExistException(cluster_id=cluster_id, bk_biz_id=bk_biz_id, message=_("集群不存在")) + + if cluster.cluster_type == ClusterType.TenDBHA.value: + ip_port = cluster.storageinstance_set.get(instance_inner_role=InstanceInnerRole.MASTER).ip_port + else: + raise DBMetaException(message=_("集群实例类型不适用于开区")) + + return { + "cluster_id": cluster.id, + "bk_cloud_id": cluster.bk_cloud_id, + "cluster_type": cluster.cluster_type, + "ip": ip_port.split(":")[0], + "port": int(ip_port.split(":")[1]), + "root_id": self.root_id, + } + + def __get_target_cluster(self, info: dict) -> list: + """ + @param target_cluster_ids: 列表,循环获取详细集群信息 + @return: 获取目标集群相关信息 + """ + target_clusters = [] + for tc_id in info["target_clusters"]: + target_cluster = self.__get_cluster_info(cluster_id=tc_id, bk_biz_id=self.data["bk_biz_id"]) + target_cluster["open_area_param"] = [{"db_list": info["db_list"], "schema": "migrate_database"}] + target_clusters.append(target_cluster) + + return target_clusters + + def __get_source_cluster(self, info: dict) -> dict: + """ + @param target_cluster_ids: 列表,循环获取详细集群信息 + @return: 获取目标集群相关信息 + """ + # 返回字典类型 + source_cluster = self.__get_cluster_info(cluster_id=info["source_cluster"], bk_biz_id=self.data["bk_biz_id"]) + # 字典增加键值 + source_cluster["open_area_param"] = [{"db_list": info["db_list"]}] + return source_cluster + + def __get_exec_ip_list(self, source_cluster: dict, target_clusters: list) -> list: + """ + 过滤需要下发act的IP + @param source_cluster: + @param target_clusters: + @return: + """ + exec_ip_list = [] + exec_ip_list.append(source_cluster["ip"]) + for tc in target_clusters: + if tc["ip"] not in exec_ip_list: + exec_ip_list.append(tc["ip"]) + + return exec_ip_list + + def __get_all_cluster_id(self) -> list: + """ + 获取所有集群id,包括原集群与目标集群,用于密码随机化 + @return: + """ + cluster_ids = [] + for info in self.data["infos"]: + cluster_ids.append(info["source_cluster"]) + for target_cluster in info["target_clusters"]: + cluster_ids.append(target_cluster) + + return cluster_ids + + def mysql_data_migrate_flow(self): + cluster_ids = self.__get_all_cluster_id() + pipeline = Builder(root_id=self.root_id, data=self.data, need_random_pass_cluster_ids=list(set(cluster_ids))) + + sub_pipelines = [] + for info in self.data["infos"]: + source_cluster = self.__get_source_cluster(info) + target_clusters = self.__get_target_cluster(info) + exec_ip_list = self.__get_exec_ip_list(source_cluster, target_clusters) + + sub_pipeline = SubBuilder(root_id=self.root_id, data=self.data) + sub_pipeline.add_act( + act_name=_("下发db_actuator介质"), + act_component_code=TransFileComponent.code, + kwargs=asdict( + DownloadMediaKwargs( + bk_cloud_id=source_cluster["bk_cloud_id"], + exec_ip=exec_ip_list, + file_list=GetFileList(db_type=DBType.MySQL).get_db_actuator_package(), + ) + ), + ) + + sub_pipeline.add_act( + act_name=_("从源实例获取库"), + act_component_code=ExecuteDBActuatorScriptComponent.code, + kwargs=asdict( + ExecActuatorKwargs( + bk_cloud_id=source_cluster["bk_cloud_id"], + cluster_type=source_cluster["cluster_type"], + cluster=source_cluster, + exec_ip=source_cluster["ip"], + get_mysql_payload_func=MysqlActPayload.get_mysql_data_migrate_dump_payload.__name__, + ) + ), + ) + + # 源集群不需要下发,移除其ip + exec_ip_list.remove(source_cluster["ip"]) + sub_pipeline.add_act( + act_name=_("下发库表文件到目标实例"), + act_component_code=TransFileComponent.code, + kwargs=asdict( + DownloadMediaKwargs( + bk_cloud_id=source_cluster["bk_cloud_id"], + exec_ip=exec_ip_list, + file_target_path=self.work_dir, + file_list=GetFileList(db_type=DBType.MySQL).mysql_import_sqlfile( + path=BKREPO_SQLFILE_PATH, + filelist=[self.migrate_tar_file_name, self.migrate_md5sum_file_name], + ), + ) + ), + ) + acts_list = [] + for target_cluster in target_clusters: + acts_list.append( + { + "act_name": _("向目标实例导入库"), + "act_component_code": ExecuteDBActuatorScriptComponent.code, + "kwargs": asdict( + ExecActuatorKwargs( + bk_cloud_id=target_cluster["bk_cloud_id"], + cluster_type=target_cluster["cluster_type"], + cluster=target_cluster, + exec_ip=target_cluster["ip"], + get_mysql_payload_func=MysqlActPayload.get_mysql_data_migrate_import_payload.__name__, + ) + ), + } + ) + sub_pipeline.add_parallel_acts(acts_list=acts_list) + + sub_pipelines.append(sub_pipeline.build_sub_process(sub_name=_("数据迁移流程"))) + + pipeline.add_parallel_sub_pipeline(sub_flow_list=sub_pipelines) + pipeline.run_pipeline(is_drop_random_user=True) diff --git a/dbm-ui/backend/flow/engine/bamboo/scene/mysql/mysql_migrate_cluster_flow.py b/dbm-ui/backend/flow/engine/bamboo/scene/mysql/mysql_migrate_cluster_flow.py index 00573b47fb..5a3633b5d8 100644 --- a/dbm-ui/backend/flow/engine/bamboo/scene/mysql/mysql_migrate_cluster_flow.py +++ b/dbm-ui/backend/flow/engine/bamboo/scene/mysql/mysql_migrate_cluster_flow.py @@ -22,6 +22,7 @@ from backend.db_package.models import Package from backend.flow.consts import MediumEnum from backend.flow.engine.bamboo.scene.common.builder import Builder, SubBuilder +from backend.flow.engine.bamboo.scene.common.get_file_list import GetFileList from backend.flow.engine.bamboo.scene.mysql.common.common_sub_flow import ( build_surrounding_apps_sub_flow, install_mysql_in_cluster_sub_flow, @@ -36,9 +37,15 @@ from backend.flow.plugins.components.collections.mysql.clear_machine import MySQLClearMachineComponent from backend.flow.plugins.components.collections.mysql.exec_actuator_script import ExecuteDBActuatorScriptComponent from backend.flow.plugins.components.collections.mysql.mysql_db_meta import MySQLDBMetaComponent +from backend.flow.plugins.components.collections.mysql.trans_flies import TransFileComponent from backend.flow.utils.common_act_dataclass import DownloadBackupClientKwargs from backend.flow.utils.mysql.common.mysql_cluster_info import get_ports, get_version_and_charset -from backend.flow.utils.mysql.mysql_act_dataclass import ClearMachineKwargs, DBMetaOPKwargs, ExecActuatorKwargs +from backend.flow.utils.mysql.mysql_act_dataclass import ( + ClearMachineKwargs, + DBMetaOPKwargs, + DownloadMediaKwargs, + ExecActuatorKwargs, +) from backend.flow.utils.mysql.mysql_act_playload import MysqlActPayload from backend.flow.utils.mysql.mysql_context_dataclass import ClusterInfoContext from backend.flow.utils.mysql.mysql_db_meta import MySQLDBMeta @@ -302,6 +309,19 @@ def deploy_migrate_cluster_flow(self): "backend_port": self.data["ports"], "bk_cloud_id": self.data["bk_cloud_id"], } + + uninstall_svr_sub_pipeline.add_act( + act_name=_("下发db-actor到节点{}".format(ip)), + act_component_code=TransFileComponent.code, + kwargs=asdict( + DownloadMediaKwargs( + bk_cloud_id=cluster_class.bk_cloud_id, + exec_ip=[ip], + file_list=GetFileList(db_type=DBType.MySQL).get_db_actuator_package(), + ) + ), + ) + uninstall_svr_sub_pipeline.add_act( act_name=_("清理实例级别周边配置"), act_component_code=ExecuteDBActuatorScriptComponent.code, diff --git a/dbm-ui/backend/flow/engine/bamboo/scene/mysql/mysql_migrate_cluster_remote_flow.py b/dbm-ui/backend/flow/engine/bamboo/scene/mysql/mysql_migrate_cluster_remote_flow.py index 22989fb472..8539fbf143 100644 --- a/dbm-ui/backend/flow/engine/bamboo/scene/mysql/mysql_migrate_cluster_remote_flow.py +++ b/dbm-ui/backend/flow/engine/bamboo/scene/mysql/mysql_migrate_cluster_remote_flow.py @@ -24,6 +24,7 @@ from backend.db_services.mysql.fixpoint_rollback.handlers import FixPointRollbackHandler from backend.flow.consts import MediumEnum from backend.flow.engine.bamboo.scene.common.builder import Builder, SubBuilder +from backend.flow.engine.bamboo.scene.common.get_file_list import GetFileList from backend.flow.engine.bamboo.scene.mysql.common.common_sub_flow import ( build_surrounding_apps_sub_flow, install_mysql_in_cluster_sub_flow, @@ -37,9 +38,15 @@ from backend.flow.plugins.components.collections.mysql.clear_machine import MySQLClearMachineComponent from backend.flow.plugins.components.collections.mysql.exec_actuator_script import ExecuteDBActuatorScriptComponent from backend.flow.plugins.components.collections.mysql.mysql_db_meta import MySQLDBMetaComponent +from backend.flow.plugins.components.collections.mysql.trans_flies import TransFileComponent from backend.flow.utils.common_act_dataclass import DownloadBackupClientKwargs from backend.flow.utils.mysql.common.mysql_cluster_info import get_ports, get_version_and_charset -from backend.flow.utils.mysql.mysql_act_dataclass import ClearMachineKwargs, DBMetaOPKwargs, ExecActuatorKwargs +from backend.flow.utils.mysql.mysql_act_dataclass import ( + ClearMachineKwargs, + DBMetaOPKwargs, + DownloadMediaKwargs, + ExecActuatorKwargs, +) from backend.flow.utils.mysql.mysql_act_playload import MysqlActPayload from backend.flow.utils.mysql.mysql_context_dataclass import ClusterInfoContext from backend.flow.utils.mysql.mysql_db_meta import MySQLDBMeta @@ -291,6 +298,19 @@ def migrate_cluster_flow(self): "backend_port": self.data["ports"], "bk_cloud_id": self.data["bk_cloud_id"], } + + uninstall_svr_sub_pipeline.add_act( + act_name=_("下发db-actor到节点{}".format(ip)), + act_component_code=TransFileComponent.code, + kwargs=asdict( + DownloadMediaKwargs( + bk_cloud_id=cluster_class.bk_cloud_id, + exec_ip=[ip], + file_list=GetFileList(db_type=DBType.MySQL).get_db_actuator_package(), + ) + ), + ) + uninstall_svr_sub_pipeline.add_act( act_name=_("清理实例级别周边配置"), act_component_code=ExecuteDBActuatorScriptComponent.code, diff --git a/dbm-ui/backend/flow/engine/bamboo/scene/mysql/mysql_open_area_flow.py b/dbm-ui/backend/flow/engine/bamboo/scene/mysql/mysql_open_area_flow.py index 227ad0a26b..5d23dda174 100644 --- a/dbm-ui/backend/flow/engine/bamboo/scene/mysql/mysql_open_area_flow.py +++ b/dbm-ui/backend/flow/engine/bamboo/scene/mysql/mysql_open_area_flow.py @@ -243,7 +243,7 @@ def mysql_open_area_flow(self): act_component_code=TransFileComponent.code, kwargs=asdict( DownloadMediaKwargs( - bk_cloud_id=0, + bk_cloud_id=source_cluster_schema["bk_cloud_id"], exec_ip=exec_ip_list, file_target_path=self.work_dir, file_list=GetFileList(db_type=DBType.MySQL).mysql_import_sqlfile( diff --git a/dbm-ui/backend/flow/engine/bamboo/scene/mysql/mysql_restore_slave_flow.py b/dbm-ui/backend/flow/engine/bamboo/scene/mysql/mysql_restore_slave_flow.py index 385537bd36..83f3655531 100644 --- a/dbm-ui/backend/flow/engine/bamboo/scene/mysql/mysql_restore_slave_flow.py +++ b/dbm-ui/backend/flow/engine/bamboo/scene/mysql/mysql_restore_slave_flow.py @@ -284,6 +284,18 @@ def deploy_restore_slave_flow(self): ), ) + uninstall_svr_sub_pipeline.add_act( + act_name=_("下发db-actor到节点{}".format(self.data["old_slave_ip"])), + act_component_code=TransFileComponent.code, + kwargs=asdict( + DownloadMediaKwargs( + bk_cloud_id=cluster_class.bk_cloud_id, + exec_ip=[self.data["old_slave_ip"]], + file_list=GetFileList(db_type=DBType.MySQL).get_db_actuator_package(), + ) + ), + ) + uninstall_svr_sub_pipeline.add_act( act_name=_("清理机器配置"), act_component_code=MySQLClearMachineComponent.code, diff --git a/dbm-ui/backend/flow/engine/bamboo/scene/mysql/mysql_restore_slave_remote_flow.py b/dbm-ui/backend/flow/engine/bamboo/scene/mysql/mysql_restore_slave_remote_flow.py index 942bda9565..83ff83274d 100644 --- a/dbm-ui/backend/flow/engine/bamboo/scene/mysql/mysql_restore_slave_remote_flow.py +++ b/dbm-ui/backend/flow/engine/bamboo/scene/mysql/mysql_restore_slave_remote_flow.py @@ -260,6 +260,17 @@ def tendb_ha_restore_slave_flow(self): ) ), ) + uninstall_svr_sub_pipeline.add_act( + act_name=_("下发db-actor到节点{}".format(self.data["old_slave_ip"])), + act_component_code=TransFileComponent.code, + kwargs=asdict( + DownloadMediaKwargs( + bk_cloud_id=cluster_class.bk_cloud_id, + exec_ip=[self.data["old_slave_ip"]], + file_list=GetFileList(db_type=DBType.MySQL).get_db_actuator_package(), + ) + ), + ) uninstall_svr_sub_pipeline.add_act( act_name=_("清理机器配置"), act_component_code=MySQLClearMachineComponent.code, diff --git a/dbm-ui/backend/flow/engine/bamboo/scene/spider/remote_slave_recover.py b/dbm-ui/backend/flow/engine/bamboo/scene/spider/remote_slave_recover.py index 8ba3efc823..59e2bccbc7 100644 --- a/dbm-ui/backend/flow/engine/bamboo/scene/spider/remote_slave_recover.py +++ b/dbm-ui/backend/flow/engine/bamboo/scene/spider/remote_slave_recover.py @@ -16,10 +16,12 @@ from django.utils.crypto import get_random_string from django.utils.translation import ugettext as _ +from backend.configuration.constants import DBType from backend.constants import IP_PORT_DIVIDER from backend.db_meta.enums import ClusterType from backend.db_meta.models import Cluster from backend.flow.engine.bamboo.scene.common.builder import Builder, SubBuilder +from backend.flow.engine.bamboo.scene.common.get_file_list import GetFileList from backend.flow.engine.bamboo.scene.mysql.common.common_sub_flow import ( build_surrounding_apps_sub_flow, install_mysql_in_cluster_sub_flow, @@ -30,13 +32,19 @@ from backend.flow.plugins.components.collections.common.pause import PauseComponent from backend.flow.plugins.components.collections.mysql.clear_machine import MySQLClearMachineComponent from backend.flow.plugins.components.collections.mysql.exec_actuator_script import ExecuteDBActuatorScriptComponent +from backend.flow.plugins.components.collections.mysql.trans_flies import TransFileComponent from backend.flow.plugins.components.collections.spider.spider_db_meta import SpiderDBMetaComponent from backend.flow.plugins.components.collections.spider.switch_remote_slave_routing import ( SwitchRemoteSlaveRoutingComponent, ) from backend.flow.utils.common_act_dataclass import DownloadBackupClientKwargs from backend.flow.utils.mysql.common.mysql_cluster_info import get_version_and_charset -from backend.flow.utils.mysql.mysql_act_dataclass import ClearMachineKwargs, DBMetaOPKwargs, ExecActuatorKwargs +from backend.flow.utils.mysql.mysql_act_dataclass import ( + ClearMachineKwargs, + DBMetaOPKwargs, + DownloadMediaKwargs, + ExecActuatorKwargs, +) from backend.flow.utils.mysql.mysql_act_playload import MysqlActPayload from backend.flow.utils.mysql.mysql_context_dataclass import ClusterInfoContext from backend.flow.utils.spider.spider_act_dataclass import InstancePairs, SwitchRemoteSlaveRoutingKwargs @@ -284,6 +292,18 @@ def tendb_remote_slave_recover(self): ) ), ) + uninstall_svr_sub_pipeline.add_act( + act_name=_("下发db-actor到节点{}".format(self.data["source_ip"])), + act_component_code=TransFileComponent.code, + kwargs=asdict( + DownloadMediaKwargs( + bk_cloud_id=cluster_class.bk_cloud_id, + exec_ip=[self.data["source_ip"]], + file_list=GetFileList(db_type=DBType.MySQL).get_db_actuator_package(), + ) + ), + ) + uninstall_svr_sub_pipeline.add_act( act_name=_("清理机器配置"), act_component_code=MySQLClearMachineComponent.code, diff --git a/dbm-ui/backend/flow/engine/bamboo/scene/spider/spider_remotedb_migrate_flow.py b/dbm-ui/backend/flow/engine/bamboo/scene/spider/spider_remotedb_migrate_flow.py index 74d54c1d52..ec0fdef3ae 100644 --- a/dbm-ui/backend/flow/engine/bamboo/scene/spider/spider_remotedb_migrate_flow.py +++ b/dbm-ui/backend/flow/engine/bamboo/scene/spider/spider_remotedb_migrate_flow.py @@ -15,7 +15,9 @@ from django.utils.translation import ugettext as _ +from backend.configuration.constants import DBType from backend.flow.engine.bamboo.scene.common.builder import Builder, SubBuilder +from backend.flow.engine.bamboo.scene.common.get_file_list import GetFileList from backend.flow.engine.bamboo.scene.spider.spider_remote_node_migrate import ( remote_node_migrate_sub_flow, remote_node_uninstall_sub_flow, @@ -23,7 +25,8 @@ from backend.flow.plugins.components.collections.common.pause import PauseComponent from backend.flow.plugins.components.collections.mysql.clear_machine import MySQLClearMachineComponent from backend.flow.plugins.components.collections.mysql.mysql_db_meta import MySQLDBMetaComponent -from backend.flow.utils.mysql.mysql_act_dataclass import ClearMachineKwargs, DBMetaOPKwargs +from backend.flow.plugins.components.collections.mysql.trans_flies import TransFileComponent +from backend.flow.utils.mysql.mysql_act_dataclass import ClearMachineKwargs, DBMetaOPKwargs, DownloadMediaKwargs from backend.flow.utils.spider.spider_db_meta import SpiderDBMeta from backend.flow.utils.spider.tendb_cluster_info import get_remotedb_info @@ -163,6 +166,17 @@ def tendb_migrate(self): ) ), ) + uninstall_sub_pipeline.add_act( + act_name=_("下发db-actor到节点{}".format(ip)), + act_component_code=TransFileComponent.code, + kwargs=asdict( + DownloadMediaKwargs( + bk_cloud_id=self.data["bk_cloud_id"], + exec_ip=[ip], + file_list=GetFileList(db_type=DBType.MySQL).get_db_actuator_package(), + ) + ), + ) # 下线机器 uninstall_sub_pipeline.add_act( act_name=_("清理机器配置"), diff --git a/dbm-ui/backend/flow/engine/bamboo/scene/spider/spider_remotedb_rebalance_flow.py b/dbm-ui/backend/flow/engine/bamboo/scene/spider/spider_remotedb_rebalance_flow.py index 08def5aeb7..171b054095 100644 --- a/dbm-ui/backend/flow/engine/bamboo/scene/spider/spider_remotedb_rebalance_flow.py +++ b/dbm-ui/backend/flow/engine/bamboo/scene/spider/spider_remotedb_rebalance_flow.py @@ -17,11 +17,13 @@ from django.utils import timezone from django.utils.translation import ugettext as _ +from backend.configuration.constants import DBType from backend.constants import IP_PORT_DIVIDER from backend.db_meta.enums import ClusterType from backend.db_meta.models import Cluster from backend.db_services.mysql.fixpoint_rollback.handlers import FixPointRollbackHandler from backend.flow.engine.bamboo.scene.common.builder import Builder, SubBuilder +from backend.flow.engine.bamboo.scene.common.get_file_list import GetFileList from backend.flow.engine.bamboo.scene.mysql.common.common_sub_flow import ( build_surrounding_apps_sub_flow, install_mysql_in_cluster_sub_flow, @@ -36,10 +38,16 @@ from backend.flow.plugins.components.collections.common.pause import PauseComponent from backend.flow.plugins.components.collections.mysql.clear_machine import MySQLClearMachineComponent from backend.flow.plugins.components.collections.mysql.exec_actuator_script import ExecuteDBActuatorScriptComponent +from backend.flow.plugins.components.collections.mysql.trans_flies import TransFileComponent from backend.flow.plugins.components.collections.spider.spider_db_meta import SpiderDBMetaComponent from backend.flow.utils.common_act_dataclass import DownloadBackupClientKwargs from backend.flow.utils.mysql.common.mysql_cluster_info import get_version_and_charset -from backend.flow.utils.mysql.mysql_act_dataclass import ClearMachineKwargs, DBMetaOPKwargs, ExecActuatorKwargs +from backend.flow.utils.mysql.mysql_act_dataclass import ( + ClearMachineKwargs, + DBMetaOPKwargs, + DownloadMediaKwargs, + ExecActuatorKwargs, +) from backend.flow.utils.mysql.mysql_act_playload import MysqlActPayload from backend.flow.utils.mysql.mysql_context_dataclass import ClusterInfoContext from backend.flow.utils.spider.spider_db_meta import SpiderDBMeta @@ -341,6 +349,19 @@ def tendb_migrate(self): ) ), ) + + uninstall_svr_sub_pipeline.add_act( + act_name=_("下发db-actor到节点{}".format(ip)), + act_component_code=TransFileComponent.code, + kwargs=asdict( + DownloadMediaKwargs( + bk_cloud_id=cluster_class.bk_cloud_id, + exec_ip=[ip], + file_list=GetFileList(db_type=DBType.MySQL).get_db_actuator_package(), + ) + ), + ) + uninstall_svr_sub_pipeline.add_act( act_name=_("清理机器配置"), act_component_code=MySQLClearMachineComponent.code, diff --git a/dbm-ui/backend/flow/engine/controller/mysql.py b/dbm-ui/backend/flow/engine/controller/mysql.py index 1692bb7db4..0fe7f994c0 100644 --- a/dbm-ui/backend/flow/engine/controller/mysql.py +++ b/dbm-ui/backend/flow/engine/controller/mysql.py @@ -14,6 +14,7 @@ from backend.flow.engine.bamboo.scene.mysql.mysql_authorize_rules import MySQLAuthorizeRules from backend.flow.engine.bamboo.scene.mysql.mysql_checksum import MysqlChecksumFlow from backend.flow.engine.bamboo.scene.mysql.mysql_clone_rules import MySQLCloneRules +from backend.flow.engine.bamboo.scene.mysql.mysql_data_migrate_flow import MysqlDataMigrateFlow from backend.flow.engine.bamboo.scene.mysql.mysql_edit_config_flow import MysqlEditConfigFlow from backend.flow.engine.bamboo.scene.mysql.mysql_fake_sql_semantic_check import MySQLFakeSemanticCheck from backend.flow.engine.bamboo.scene.mysql.mysql_flashback_flow import MysqlFlashbackFlow @@ -600,3 +601,12 @@ def mysql_upgrade_scene(self): """ flow = MySQLLocalUpgradeFlow(root_id=self.root_id, data=self.ticket_data) flow.upgrade_mysql_flow() + + def mysql_data_migrate_scene(self): + """ + mysql数据迁移 + 从源集群导出数据 导入目标集群 + @return: + """ + flow = MysqlDataMigrateFlow(root_id=self.root_id, data=self.ticket_data) + flow.mysql_data_migrate_flow() diff --git a/dbm-ui/backend/flow/plugins/components/collections/common/drop_random_job_user.py b/dbm-ui/backend/flow/plugins/components/collections/common/drop_random_job_user.py index 119e50ee47..124732d706 100644 --- a/dbm-ui/backend/flow/plugins/components/collections/common/drop_random_job_user.py +++ b/dbm-ui/backend/flow/plugins/components/collections/common/drop_random_job_user.py @@ -74,7 +74,8 @@ def drop_jor_user(self, cluster: Cluster, root_id: str): try: # 删除localhost和 local_ip用户 for instance in self._get_instance_for_cluster(cluster=cluster): - cmd = [] + # 默认先关闭binlog记录, 最后统一打开 + cmd = ["set session sql_log_bin = 0 ;"] if instance["is_tdbctl"]: cmd.append("set tc_admin = 0;") self.log_info(f"the cluster version is {cluster.major_version}") @@ -88,11 +89,13 @@ def drop_jor_user(self, cluster: Cluster, root_id: str): f"drop user `{user}`@`localhost`;", f"drop user `{user}`@`{instance['ip_port'].split(':')[0]}`;", ] + # 最后统一打开binlog, 避免复用异常 + cmd.append("set session sql_log_bin = 1 ;") resp = DRSApi.rpc( { "addresses": [instance["ip_port"]], "cmds": cmd, - "force": False, + "force": True, # 中间出错也要执行下去,保证重新打开binlog "bk_cloud_id": cluster.bk_cloud_id, } ) diff --git a/dbm-ui/backend/flow/urls.py b/dbm-ui/backend/flow/urls.py index 4c40abac58..ccace1fdf7 100644 --- a/dbm-ui/backend/flow/urls.py +++ b/dbm-ui/backend/flow/urls.py @@ -93,6 +93,7 @@ from backend.flow.views.mysql_add_slave import AddMysqlSlaveSceneApiView from backend.flow.views.mysql_add_slave_remote import AddMysqlSlaveRemoteSceneApiView from backend.flow.views.mysql_checksum import MysqlChecksumSceneApiView +from backend.flow.views.mysql_data_migrate import MysqlDataMigrateSceneApiView from backend.flow.views.mysql_edit_config import MysqlEditConfigSceneApiView from backend.flow.views.mysql_flashback import MysqlFlashbackSceneApiView from backend.flow.views.mysql_ha_apply import InstallMySQLHASceneApiView @@ -412,6 +413,7 @@ url(r"^scene/replace_pulsar$", ReplacePulsarSceneApiView.as_view()), url(r"^scene/fake_install_pulsar$", FakeInstallPulsarSceneApiView.as_view()), url(r"^scene/import_resource_init$", ImportResourceInitStepApiView.as_view()), + url("^scene/mysql_data_migrate$", MysqlDataMigrateSceneApiView.as_view()), # spider url(r"^scene/add_spider_mnt$", AddSpiderMNTSceneApiView.as_view()), url(r"^scene/install_tendb_cluster$", InstallSpiderClusterSceneApiView.as_view()), diff --git a/dbm-ui/backend/flow/utils/mysql/mysql_act_playload.py b/dbm-ui/backend/flow/utils/mysql/mysql_act_playload.py index 6363dc987d..7cf5215731 100644 --- a/dbm-ui/backend/flow/utils/mysql/mysql_act_playload.py +++ b/dbm-ui/backend/flow/utils/mysql/mysql_act_playload.py @@ -2060,3 +2060,69 @@ def get_standardize_tendbha_proxy_payload(**kwargs): "action": DBActuatorActionEnum.StandardizeTenDBHAProxy.value, "payload": {"general": {}, "extend": {}}, # {"runtime_account": self.account}, } + + def get_mysql_data_migrate_dump_payload(self, **kwargs): + """ + 数据迁移导出库表结构与数据 + @param kwargs: + @return: + """ + fileserver = {} + db_cloud_token = AsymmetricHandler.encrypt( + name=AsymmetricCipherConfigType.PROXYPASS.value, content=f"{self.bk_cloud_id}_dbactuator_token" + ) + nginx_ip = DBCloudProxy.objects.filter(bk_cloud_id=self.bk_cloud_id).last().internal_address + bkrepo_url = f"http://{nginx_ip}/apis/proxypass" if self.bk_cloud_id else settings.BKREPO_ENDPOINT_URL + + fileserver.update( + { + "url": bkrepo_url, + "bucket": settings.BKREPO_BUCKET, + "username": settings.BKREPO_USERNAME, + "password": settings.BKREPO_PASSWORD, + "project": settings.BKREPO_PROJECT, + "upload_path": BKREPO_SQLFILE_PATH, + } + ) + + return { + "db_type": DBActuatorTypeEnum.MySQL.value, + "action": DBActuatorActionEnum.MysqlDataMigrateDump.value, + "payload": { + "general": {"runtime_account": self.account}, + "extend": { + "host": kwargs["ip"], + "port": self.cluster["port"], + "charset": "default", + "root_id": self.cluster["root_id"], + "bk_cloud_id": self.bk_cloud_id, + "db_cloud_token": db_cloud_token, + "dump_dir_name": f"{self.cluster['root_id']}_migrate", + "fileserver": fileserver, + "open_area_param": self.cluster["open_area_param"], + }, + }, + } + + def get_mysql_data_migrate_import_payload(self, **kwargs): + """ + 数据迁移导入库表结构与数据 + @param kwargs: + @return: + """ + return { + "db_type": DBActuatorTypeEnum.MySQL.value, + "action": DBActuatorActionEnum.MysqlDataMigrateImport.value, + "payload": { + "general": {"runtime_account": self.account}, + "extend": { + "host": kwargs["ip"], + "port": self.cluster["port"], + "charset": "default", + "root_id": self.cluster["root_id"], + "bk_cloud_id": self.bk_cloud_id, + "dump_dir_name": f"{self.cluster['root_id']}_migrate", + "open_area_param": self.cluster["open_area_param"], + }, + }, + } diff --git a/dbm-ui/backend/flow/utils/mysql/mysql_db_meta.py b/dbm-ui/backend/flow/utils/mysql/mysql_db_meta.py index da4e7bcd69..f05594de35 100644 --- a/dbm-ui/backend/flow/utils/mysql/mysql_db_meta.py +++ b/dbm-ui/backend/flow/utils/mysql/mysql_db_meta.py @@ -274,7 +274,7 @@ def mysql_restore_remove_old_slave(self): """ # 获取cluster_types_list cluster_types = ( - Cluster.objects.filter(cluster_id__in=self.cluster["cluster_ids"]) + Cluster.objects.filter(id__in=self.cluster["cluster_ids"]) .values_list("cluster_type", flat=True) .distinct() ) @@ -417,7 +417,7 @@ def mysql_ha_switch(self): # 获取cluster_types_list cluster_types = ( - Cluster.objects.filter(cluster_id__in=self.cluster["cluster_ids"]) + Cluster.objects.filter(id__in=self.cluster["cluster_ids"]) .values_list("cluster_type", flat=True) .distinct() ) @@ -594,7 +594,7 @@ def mysql_cluster_migrate_remote_instance(self): """ # 获取cluster_types_list cluster_types = ( - Cluster.objects.filter(cluster_id__in=self.cluster["cluster_ids"]) + Cluster.objects.filter(id__in=self.cluster["cluster_ids"]) .values_list("cluster_type", flat=True) .distinct() ) @@ -920,7 +920,7 @@ def uninstall_instance(self): 实例卸载完毕修改元数据 """ cluster_types = ( - Cluster.objects.filter(cluster_id__in=self.cluster["cluster_ids"]) + Cluster.objects.filter(id__in=self.cluster["cluster_ids"]) .values_list("cluster_type", flat=True) .distinct() ) diff --git a/dbm-ui/backend/flow/views/mysql_data_migrate.py b/dbm-ui/backend/flow/views/mysql_data_migrate.py new file mode 100644 index 0000000000..5f47e2958c --- /dev/null +++ b/dbm-ui/backend/flow/views/mysql_data_migrate.py @@ -0,0 +1,32 @@ +""" +TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-DB管理系统(BlueKing-BK-DBM) available. +Copyright (C) 2017-2023 THL A29 Limited, a Tencent company. All rights reserved. +Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. +You may obtain a copy of the License at https://opensource.org/licenses/MIT +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on +an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the +specific language governing permissions and limitations under the License. +""" +import logging + +from rest_framework.response import Response + +from backend.flow.engine.controller.mysql import MySQLController +from backend.flow.views.base import FlowTestView +from backend.utils.basic import generate_root_id + +logger = logging.getLogger("root") + + +class MysqlDataMigrateSceneApiView(FlowTestView): + """ + mysql数据迁移 + api: /apis/v1/flow/scene/mysql_data_migrate + params: + """ + + def post(self, request): + root_id = generate_root_id() + test = MySQLController(root_id=root_id, ticket_data=request.data) + test.mysql_data_migrate_scene() + return Response({"root_id": root_id}) diff --git a/dbm-ui/backend/tests/db_services/mysql/sqlparse/__init__.py b/dbm-ui/backend/tests/db_services/mysql/sqlparse/__init__.py new file mode 100644 index 0000000000..aa5085c628 --- /dev/null +++ b/dbm-ui/backend/tests/db_services/mysql/sqlparse/__init__.py @@ -0,0 +1,10 @@ +# -*- coding: utf-8 -*- +""" +TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-DB管理系统(BlueKing-BK-DBM) available. +Copyright (C) 2017-2023 THL A29 Limited, a Tencent company. All rights reserved. +Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. +You may obtain a copy of the License at https://opensource.org/licenses/MIT +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on +an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the +specific language governing permissions and limitations under the License. +""" diff --git a/dbm-ui/backend/tests/db_services/mysql/sqlparse/test_handlers.py b/dbm-ui/backend/tests/db_services/mysql/sqlparse/test_handlers.py new file mode 100644 index 0000000000..f16116a695 --- /dev/null +++ b/dbm-ui/backend/tests/db_services/mysql/sqlparse/test_handlers.py @@ -0,0 +1,636 @@ +# -*- coding: utf-8 -*- +""" +TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-DB管理系统(BlueKing-BK-DBM) available. +Copyright (C) 2017-2023 THL A29 Limited, a Tencent company. All rights reserved. +Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. +You may obtain a copy of the License at https://opensource.org/licenses/MIT +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on +an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the +specific language governing permissions and limitations under the License. +""" +from backend.db_services.mysql.sqlparse.handlers import SQLParseHandler + + +class TestSQLParseHandler: + @staticmethod + def test_select_for_update(): + sql = "select * from goods where id = 1 and name='prod11' for update;" + result = SQLParseHandler().parse_sql(sql) + assert result == { + "command": "SELECT,UPDATE", + "query_string": "select * from goods where id = 1 and name='prod11' for update;", + "query_digest_text": "select * from goods where id = ? and name = '?' for update ;", + "query_digest_md5": "cd85202ea8e7e56979472302581b9fa9", + "table_name": "goods", + "query_length": 62, + } + + @staticmethod + def test_include_punctuation(): + sql = """ + select id,'18;19',age,33454354.453 from actor where + id='dsadsadsadsadsadsadsadsadads' + and id2=12321321321 + and dt='2011-10-10'; + """ + assert SQLParseHandler().parse_sql(sql) == { + "command": "SELECT", + "query_string": "select id,'18;19',age,33454354.453 from actor where id='dsadsadsadsadsadsadsadsadads' " + "and id2=12321321321 and dt='2011-10-10';", + "query_digest_text": "select id , '?' , age , ? from actor where id = '?' and id2 = ? and dt = '?' ;", + "query_digest_md5": "49ff4686038156fb7dea8b5b2fe59ce5", + "table_name": "actor", + "query_length": 128, + } + + @staticmethod + def test_join(): + sql = ( + "select trx_state,p.command,p.state, p.user, max(TIMESTAMPDIFF(SECOND,trx_started,now())) " + "max_trx_long_time, max(p.time) max_trx_idle_time from information_schema.innodb_trx t join " + "information_schema.processlist p on t.trx_mysql_thread_id=p.id group by trx_state,command,state,user;" + ) + assert SQLParseHandler().parse_sql(sql) == { + "command": "SELECT", + "query_string": "select trx_state,p.command,p.state, p.user, max(TIMESTAMPDIFF(SECOND,trx_started,now()))" + " max_trx_long_time, max(p.time) max_trx_idle_time from information_schema.innodb_trx t " + "join information_schema.processlist p on t.trx_mysql_thread_id=p.id group by trx_state," + "command,state,user;", + "query_digest_text": "select trx_state , p . command , p . state , p . user ," + " max ( TIMESTAMPDIFF ( SECOND , trx_started , now ( ) ) ) max_trx_long_time , " + "max ( p . time ) max_trx_idle_time from information_schema . innodb_trx t " + "join information_schema . processlist p on t . trx_mysql_thread_id = p . id " + "group by trx_state , command , state , user ;", + "query_digest_md5": "3d1ebf49578c16081fb1563874177290", + "table_name": "information_schema.innodb_trx,information_schema.processlist", + "query_length": 281, + } + + @staticmethod + def test_count(): + sql = ( + "select count(*) from information_schema.INNODB_TRX where TIMESTAMPDIFF(SECOND,trx_started,now()) > 30;" + ) + assert SQLParseHandler().parse_sql(sql) == { + "command": "SELECT", + "query_string": "select count(*) from information_schema.INNODB_TRX " + "where TIMESTAMPDIFF(SECOND,trx_started,now()) > 30;", + "query_digest_text": "select count ( * ) from information_schema . INNODB_TRX " + "where TIMESTAMPDIFF ( SECOND , trx_started , now ( ) ) > ? ;", + "query_digest_md5": "69302d896e1df76238f89e0b2dede003", + "table_name": "information_schema.INNODB_TRX", + "query_length": 102, + } + + @staticmethod + def test_rename_two_table(): + sql = ( + "select a.ip, b.port from db_meta_machine a, db_meta_storageinstance b " + "where a.cluster_type='tendbha' and a.access_layer='storage' and a.bk_host_id = b.machine_id;" + ) + assert SQLParseHandler().parse_sql(sql) == { + "command": "SELECT", + "query_string": "select a.ip, b.port from db_meta_machine a, db_meta_storageinstance b where " + "a.cluster_type='tendbha' and a.access_layer='storage' and a.bk_host_id = b.machine_id;", + "query_digest_text": "select a . ip , b . port from db_meta_machine a , db_meta_storageinstance b " + "where a . cluster_type = '?' and a . access_layer = '?' " + "and a . bk_host_id = b . machine_id ;", + "query_digest_md5": "60e7cb8bf7ed12639d66a79d8fadff96", + "table_name": "db_meta_machine,db_meta_storageinstance", + "query_length": 162, + } + + @staticmethod + def test_alert_table(): + sql = "alter table ha_agent_logs add index idx1(agent_ip, ip, port), drop index idx_ins;" + assert SQLParseHandler().parse_sql(sql) == { + "command": "ALTER,DROP", + "query_string": "alter table ha_agent_logs add index idx1(agent_ip, ip, port), drop index idx_ins;", + "query_digest_text": "alter table ha_agent_logs add index idx1 ( agent_ip , ip , port ) , " + "drop index idx_ins ;", + "query_digest_md5": "c8ce8c74903d0bd2d723e9cc73f0a364", + "table_name": "ha_agent_logs", + "query_length": 81, + } + + @staticmethod + def test_create_table(): + sql = "create table tb_instance_version_charset(version text, charset int, engines text);" + assert SQLParseHandler().parse_sql(sql) == { + "command": "CREATE", + "query_string": "create table tb_instance_version_charset(version text, charset int, engines text);", + "query_digest_text": "create table tb_instance_version_charset " + "( version text , charset int , engines text ) ;", + "query_digest_md5": "52668e3967eb9ae57d845f97381b0440", + "table_name": "tb_instance_version_charset", + "query_length": 82, + } + + @staticmethod + def test_drop_table(): + sql = "DROP TABLE tb_instance_version_charset;" + assert SQLParseHandler().parse_sql(sql) == { + "command": "DROP", + "query_string": "DROP TABLE tb_instance_version_charset;", + "query_digest_text": "DROP TABLE tb_instance_version_charset ;", + "query_digest_md5": "50ba8416364a3a8df59ec446820649bf", + "table_name": "tb_instance_version_charset", + "query_length": 39, + } + + @staticmethod + def test_truncate_table(): + sql = "TRUNCATE TABLE tb_instance_version_charset;" + assert SQLParseHandler().parse_sql(sql) == { + "command": "TRUNCATE", + "query_string": "TRUNCATE TABLE tb_instance_version_charset;", + "query_digest_text": "TRUNCATE TABLE tb_instance_version_charset ;", + "query_digest_md5": "421438b35cfc17475a7857b6427e9fd1", + "table_name": "tb_instance_version_charset", + "query_length": 43, + } + + @staticmethod + def test_insert_into(): + sql = """ + insert into tb_instance_version_charset(ip, port, version, charset, engines, addr, create_at) + values('127.0.0.1', 20003, '5.5.24-tmysql-1.6-log', 'utf8mb4', 'InnoDB', '127.0.0.1:20003', now()); + """ + assert SQLParseHandler().parse_sql(sql) == { + "command": "INSERT", + "query_string": "insert into tb_instance_version_charset(ip, port, version, charset, engines, addr, " + "create_at) values('127.0.0.1', 20003, '5.5.24-tmysql-1.6-log', 'utf8mb4', 'InnoDB', " + "'127.0.0.1:20003', now());", + "query_digest_text": "insert into tb_instance_version_charset ( ip , port , version , charset , engines , " + "addr , create_at ) values ( '?' , ? , '?' , '?' , '?' , '?' , now ( ) ) ;", + "query_digest_md5": "6525fca29f39de6e2f7847e6558d9e27", + "table_name": "tb_instance_version_charset", + "query_length": 195, + } + + @staticmethod + def test_insert_into_with_select(): + # TODO,此 case table name 不对 + sql = """ + INSERT IGNORE INTO gameai_llm_proxy.request_tokens (request_id, create_time) + SELECT request_id, create_time + FROM gameai_llm_proxy.request_data; + """ + print(SQLParseHandler().parse_sql(sql)) + assert SQLParseHandler().parse_sql(sql) == { + "command": "INSERT,SELECT", + "query_string": "INSERT IGNORE INTO gameai_llm_proxy.request_tokens (request_id, create_time) " + "SELECT request_id, create_time FROM gameai_llm_proxy.request_data;", + "query_digest_text": "INSERT IGNORE INTO gameai_llm_proxy . request_tokens ( request_id , create_time ) " + "SELECT request_id , create_time FROM gameai_llm_proxy . request_data ;", + "query_digest_md5": "640fe92d547930b1e7869f514aacd9e3", + "table_name": "create_time,gameai_llm_proxy.request_data,gameai_llm_proxy.request_tokens,request_id", + "query_length": 145, + } + + @staticmethod + def test_update_data(): + sql = "update db_meta_storageinstance set status='unavailable' where id in (16184,16183);" + assert SQLParseHandler().parse_sql(sql) == { + "command": "UPDATE", + "query_string": "update db_meta_storageinstance set status='unavailable' where id in (16184,16183);", + "query_digest_text": "update db_meta_storageinstance set status = '?' where id in ( ? , ? ) ;", + "query_digest_md5": "ca4129d268538e8bd332d6898eca0788", + "table_name": "db_meta_storageinstance", + "query_length": 82, + } + + @staticmethod + def test_complex_sql(): + sql = """ + SELECT --Locais Disponíveis para Abertura de Ordens de corte + COALESCE(a."Chave", b."Chave") AS "Chave", + COALESCE(a."Unidade", b."Unidade") AS "Unidade", + COALESCE(a."Fazenda", b."Fazenda") AS "Fazenda", + COALESCE(a."Talhao", b."Talhao") AS "Talhao", + COALESCE(a."Participacao", b."Participacao") AS "Participacao", + CASE + WHEN a."Condicao" = 'Disponível Parcial (Moagem)' AND + b."Condicao" = 'Disponível Parcial (Mudas)' THEN + 'Disponível (Safra+Mudas)' + ELSE + COALESCE(a."Condicao", b."Condicao") + END AS "Condicao", + COALESCE(a."Estagio", b."Estagio") AS "Estagio", + COALESCE(a."Variedade", b."Variedade") AS "Variedade", + COALESCE(a."Ciclo Maturacao", b."Ciclo Maturacao") AS "Ciclo Maturacao", + COALESCE(a."Propriedade", b."Propriedade") AS "Propriedade", + COALESCE(a."Proprietario", b."Proprietario") AS "Proprietario", + COALESCE(a."No. Corte", b."No. Corte") AS "No. Corte", + (CASE + WHEN a."Area" IS NULL THEN + 0 + ELSE + a."Area" + END + CASE + WHEN b."Area" IS NULL THEN + 0 + ELSE + b."Area" + END) AS "Area", + CASE + WHEN a."Condicao" = 'Disponível Parcial (Moagem)' AND + b."Condicao" = 'Disponível Parcial (Mudas)' THEN + ((CASE + WHEN a."Area" IS NULL THEN + 0 + ELSE + a."Area" + END + CASE + WHEN b."Area" IS NULL THEN + 0 + ELSE + b."Area" + END) * a."TCH") + ELSE + a."Toneladas" + END AS "Toneladas", + a."TCH", + COALESCE(a."Distancia", b."Distancia") AS "Distancia" + FROM (SELECT --Disponibilidade (Moagem) + A.*, + a."Area" * b."TCH" AS "Toneladas", + b."TCH" AS "TCH", + c."Dist. Terra" + c."Dist. Asfalto" AS "Distancia" + FROM ((SELECT --ÁREAS DISPONÍVEIS PARA ABERTURA DE ORDEM CORTE DE SAFRA + a."Fazenda" * 1000 + a."Talhao" AS "Chave", + CASE + WHEN a."Unidade" = 15 THEN + 'USF' + ELSE + 'URD' + END AS "Unidade", + a."Fazenda", + a."Talhao", + a."Participacao", + CASE + WHEN a."Ocorrencia Cadastro" = 'C' THEN + 'Disponível Total (Moagem)' + ELSE + 'Disponível Parcial (Moagem)' + END AS "Condicao", + a."Estagio", + a."Variedade", + a."Ciclo Maturacao", + a."Propriedade", + a."Proprietario", + a."No. Corte", + (a."Area" - (CASE + WHEN b."Area Fechada" IS NULL THEN + 0 + ELSE + b."Area Fechada" + END)) AS "Area" + FROM (SELECT --ULTIMA ESTIMATIVA DO TALHAO A + OBJ.CD_UNID_IND AS "Unidade", + OBJ.CD_UPNIVEL1 AS "Fazenda", + OBJ.CD_UPNIVEL3 AS "Talhao", + OBJ.CD_UPNIVEL1 || ' - ' || F.DE_UPNIVEL1 AS "Propriedade", + G.DE_FORNEC AS "Proprietario", + CASE + WHEN UP3.CD_TP_PROPR IN (1, 2, 3, 11) THEN + 'Parceria' + WHEN UP3.CD_TP_PROPR IN (5, 8) THEN + 'Fornecedor' + WHEN UP3.CD_TP_PROPR = 6 THEN + 'Fornecedor' + WHEN UP3.CD_TP_PROPR = 14 THEN + 'Parceria' + ELSE + 'Verificar' + END AS "Participacao", + C.FG_OCORREN AS "Ocorrencia Cadastro", + C.DT_OCORREN AS "Data Ocorrencia", + B.DA_ESTAGIO AS "Estagio", + B.NO_CORTE AS "No. Corte", + D.DE_VARIED AS "Variedade", + E.DE_MATURAC AS "Ciclo Maturacao", + (OBJ.QT_AREA_PROD * 1) AS "Area", + (OBJ.QT_CANA_ENTR / 1000) AS "Toneladas" + FROM PIMSCS.HISTPREPRO OBJ, + PIMSCS.ESTAGIOS B, + PIMSCS.UPNIVEL3 UP3, + PIMSCS.SAFRUPNIV3 C, + PIMSCS.VARIEDADES D, + PIMSCS.TIPO_MATURAC E, + PIMSCS.UPNIVEL1 F, + PIMSCS.FORNECS G + WHERE OBJ.CD_SAFRA = + (SELECT MAX(CD_SAFRA) FROM PIMSCS.HISTPREPRO) + AND OBJ.CD_UNID_IND IN (15, 19) + AND OBJ.CD_ESTAGIO = B.CD_ESTAGIO + AND OBJ.CD_UPNIVEL1 = UP3.CD_UPNIVEL1 + AND OBJ.CD_UPNIVEL3 = UP3.CD_UPNIVEL3 + AND OBJ.CD_SAFRA = UP3.CD_SAFRA + AND OBJ.CD_UPNIVEL1 = C.CD_UPNIVEL1 + AND OBJ.CD_UPNIVEL3 = C.CD_UPNIVEL3 + AND OBJ.CD_SAFRA = C.CD_SAFRA + AND UP3.CD_VARIED = D.CD_VARIED + AND E.FG_MATURAC = D.FG_MATURAC + AND OBJ.CD_UPNIVEL1 = F.CD_UPNIVEL1 + AND F.CD_FORNEC = G.CD_FORNEC + AND C.DT_OCORREN = + (SELECT MAX(D.DT_OCORREN) + FROM PIMSCS.SAFRUPNIV3 D + WHERE D.CD_UPNIVEL1 = C.CD_UPNIVEL1 + AND D.CD_UPNIVEL3 = C.CD_UPNIVEL3 + AND D.CD_SAFRA = C.CD_SAFRA) + AND OBJ.CD_HIST = + (SELECT OBJ2.CD_HIST + FROM PIMSCS.HISTPREPRO OBJ2 + WHERE OBJ2.CD_UPNIVEL1 = OBJ.CD_UPNIVEL1 + AND OBJ2.CD_UPNIVEL3 = OBJ.CD_UPNIVEL3 + AND OBJ2.CD_SAFRA = + (SELECT MAX(CD_SAFRA) + FROM PIMSCS.HISTPREPRO) + AND OBJ2.CD_HIST NOT IN ('E', 'S') + AND OBJ2.CD_EMPRESA IN (15, 19) + AND OBJ2.DT_HISTORICO = + (SELECT MAX(OBJ3.DT_HISTORICO) + FROM PIMSCS.HISTPREPRO OBJ3 + WHERE OBJ3.CD_UPNIVEL1 = + OBJ.CD_UPNIVEL1 + AND OBJ3.CD_UPNIVEL3 = + OBJ.CD_UPNIVEL3 + AND OBJ3.CD_SAFRA = + (SELECT MAX(CD_SAFRA) + FROM PIMSCS.HISTPREPRO) + AND OBJ3.CD_HIST NOT IN ('E', 'S') + AND OBJ3.CD_EMPRESA IN (15, 19)))) A, + (SELECT --ÁREA DE ORDEM DE CORTE DE SAFRA FECHADA B + QD.CD_UPNIVEL1 AS "Fazenda", + QD.CD_UPNIVEL3 AS "Talhao", + SUM(QD.QT_AREA) AS "Area Fechada" + FROM PIMSCS.QUEIMA_HE QH, PIMSCS.QUEIMA_DE QD + WHERE QH.NO_QUEIMA = QD.NO_QUEIMA + AND QD.CD_SAFRA = + (SELECT MAX(CD_SAFRA) FROM PIMSCS.HISTPREPRO) + GROUP BY QD.CD_UPNIVEL1, QD.CD_UPNIVEL3) B + WHERE a."Fazenda" = b."Fazenda"(+) + AND a."Talhao" = b."Talhao"(+) + AND a."Ocorrencia Cadastro" <> 'F' + AND (a."Area" - (CASE + WHEN b."Area Fechada" IS NULL THEN + 0 + ELSE + b."Area Fechada" + END)) > 0)) A + LEFT JOIN (SELECT --Ultima Estimativa do Talhão + A.CD_HIST "Cod. Historico", + CASE + WHEN A.CD_UNID_IND = 15 THEN + 'USF' + ELSE + 'URD' + END AS "Unidade", + A.CD_UPNIVEL1 AS "Zona", + A.CD_UPNIVEL3 AS "Talhao", + A.DT_HISTORICO AS "Data", + A.QT_AREA_PROD AS "Area", + (A.QT_CANA_ENTR / 1000) AS "Toneladas", + A.QT_TCH AS "TCH" + FROM PIMSCS.HISTPREPRO A + WHERE A.CD_UNID_IND IN (15, 19) + AND A.CD_SAFRA = + (SELECT MAX(CD_SAFRA) FROM PIMSCS.HISTPREPRO) + AND A.CD_HIST NOT IN ('E', 'S') + AND A.QT_AREA_PROD <> 0 + AND A.DT_HISTORICO = + (SELECT MAX(A2.DT_HISTORICO) + FROM PIMSCS.HISTPREPRO A2 + WHERE A.CD_SAFRA = A2.CD_SAFRA + AND A.CD_UPNIVEL2 = A2.CD_UPNIVEL1 + AND A.CD_UPNIVEL3 = A2.CD_UPNIVEL3 + AND A2.CD_HIST NOT IN ('E', 'S'))) B + ON a."Fazenda" = b."Zona" + AND a."Talhao" = b."Talhao" + LEFT JOIN (SELECT --Distancia Cadastrada + A.CD_UPNIVEL1 AS "Zona", + A.CD_UPNIVEL3 AS "Talhao", + MAX(A.DS_TERRA) AS "Dist. Terra", + MAX(A.DS_ASFALTO) AS "Dist. Asfalto" + FROM PIMSCS.UPNIVEL3 A + LEFT JOIN PIMSCS.SAFRUPNIV3 B + ON A.CD_SAFRA = B.CD_SAFRA + AND A.CD_UPNIVEL1 = B.CD_UPNIVEL1 + AND A.CD_UPNIVEL3 = B.CD_UPNIVEL3 + WHERE A.CD_UNID_IND IN (15, 19) + AND A.CD_OCUP = 1 + AND A.CD_SAFRA = + (SELECT MAX(CD_SAFRA) FROM PIMSCS.HISTPREPRO) + AND B.CD_SAFRA = + (SELECT MAX(CD_SAFRA) FROM PIMSCS.HISTPREPRO) + AND B.FG_OCORREN <> 'I' + AND B.DT_OCORREN = + (SELECT MAX(B2.DT_OCORREN) + FROM PIMSCS.SAFRUPNIV3 B2 + WHERE B.CD_SAFRA = B2.CD_SAFRA + AND B.CD_UPNIVEL1 = B2.CD_UPNIVEL1 + AND B.CD_UPNIVEL3 = B2.CD_UPNIVEL3) + GROUP BY A.CD_UPNIVEL1, A.CD_UPNIVEL3) C + ON a."Fazenda" = c."Zona" + AND a."Talhao" = c."Talhao") A + FULL JOIN (SELECT --Disponibilidade (Mudas) + A.*, + a."Area" * b."TCH" AS "Toneladas", + b."TCH" AS "TCH", + c."Dist. Terra" + c."Dist. Asfalto" AS "Distancia" + FROM ((SELECT --ÁREAS DISPONÍVEIS PARA ABERTURA DE ORDEM CORTE DE SAFRA + a."Fazenda" * 1000 + a."Talhao" AS "Chave", + CASE + WHEN a."Unidade" = 15 THEN + 'USF' + ELSE + 'URD' + END AS "Unidade", + a."Fazenda", + a."Talhao", + a."Participacao", + CASE + WHEN a."Ocorrencia Cadastro" = 'C' THEN + 'Disponível Total (Mudas)' + ELSE + 'Disponível Parcial (Mudas)' + END AS "Condicao", + a."Estagio", + a."Variedade", + a."Ciclo Maturacao", + a."Propriedade", + a."Proprietario", + a."No. Corte", + (a."Area" - (CASE + WHEN b."Area Fechada" IS NULL THEN + 0 + ELSE + b."Area Fechada" + END)) AS "Area" + FROM (SELECT --ULTIMA ESTIMATIVA DO TALHAO A + OBJ.CD_UNID_IND AS "Unidade", + OBJ.CD_UPNIVEL1 AS "Fazenda", + OBJ.CD_UPNIVEL3 AS "Talhao", + OBJ.CD_UPNIVEL1 || ' - ' || F.DE_UPNIVEL1 AS "Propriedade", + G.DE_FORNEC AS "Proprietario", + CASE + WHEN UP3.CD_TP_PROPR IN (1, 2, 3, 11) THEN + 'Parceria' + WHEN UP3.CD_TP_PROPR IN (5, 8) THEN + 'Fornecedor' + WHEN UP3.CD_TP_PROPR = 6 THEN + 'Fornecedor' + WHEN UP3.CD_TP_PROPR = 14 THEN + 'Parceria' + ELSE + 'Verificar' + END AS "Participacao", + C.FG_OCORREN AS "Ocorrencia Cadastro", + C.DT_OCORREN AS "Data Ocorrencia", + B.DA_ESTAGIO AS "Estagio", + B.NO_CORTE AS "No. Corte", + D.DE_VARIED AS "Variedade", + E.DE_MATURAC AS "Ciclo Maturacao", + (OBJ.QT_AREA_PROD * 1) AS "Area", + (OBJ.QT_CANA_ENTR / 1000) AS "Toneladas" + FROM PIMSCS.HISTPREPRO OBJ, + PIMSCS.ESTAGIOS B, + PIMSCS.UPNIVEL3 UP3, + PIMSCS.SAFRUPNIV3 C, + PIMSCS.VARIEDADES D, + PIMSCS.TIPO_MATURAC E, + PIMSCS.UPNIVEL1 F, + PIMSCS.FORNECS G + WHERE OBJ.CD_SAFRA = + (SELECT MAX(CD_SAFRA) + FROM PIMSCS.HISTPREPRO) + AND OBJ.CD_UNID_IND IN (15, 19) + AND OBJ.CD_ESTAGIO = B.CD_ESTAGIO + AND OBJ.CD_UPNIVEL1 = UP3.CD_UPNIVEL1 + AND OBJ.CD_UPNIVEL3 = UP3.CD_UPNIVEL3 + AND OBJ.CD_SAFRA = UP3.CD_SAFRA + AND OBJ.CD_UPNIVEL1 = C.CD_UPNIVEL1 + AND OBJ.CD_UPNIVEL3 = C.CD_UPNIVEL3 + AND OBJ.CD_SAFRA = C.CD_SAFRA + AND UP3.CD_VARIED = D.CD_VARIED + AND E.FG_MATURAC = D.FG_MATURAC + AND OBJ.CD_UPNIVEL1 = F.CD_UPNIVEL1 + AND F.CD_FORNEC = G.CD_FORNEC + AND C.DT_OCORREN = + (SELECT MAX(D.DT_OCORREN) + FROM PIMSCS.SAFRUPNIV3 D + WHERE D.CD_UPNIVEL1 = C.CD_UPNIVEL1 + AND D.CD_UPNIVEL3 = C.CD_UPNIVEL3 + AND D.CD_SAFRA = C.CD_SAFRA) + AND OBJ.CD_HIST = + (SELECT OBJ2.CD_HIST + FROM PIMSCS.HISTPREPRO OBJ2 + WHERE OBJ2.CD_UPNIVEL1 = OBJ.CD_UPNIVEL1 + AND OBJ2.CD_UPNIVEL3 = OBJ.CD_UPNIVEL3 + AND OBJ2.CD_SAFRA = + (SELECT MAX(CD_SAFRA) + FROM PIMSCS.HISTPREPRO) + AND OBJ2.CD_HIST = 'S' + AND OBJ2.CD_EMPRESA IN (15, 19) + AND OBJ2.DT_HISTORICO = + (SELECT MAX(OBJ3.DT_HISTORICO) + FROM PIMSCS.HISTPREPRO OBJ3 + WHERE OBJ3.CD_UPNIVEL1 = + OBJ.CD_UPNIVEL1 + AND OBJ3.CD_UPNIVEL3 = + OBJ.CD_UPNIVEL3 + AND OBJ3.CD_SAFRA = + (SELECT MAX(CD_SAFRA) + FROM PIMSCS.HISTPREPRO) + AND OBJ3.CD_HIST = 'S' + AND OBJ3.CD_EMPRESA IN (15, 19)))) A, + (SELECT --ÁREA DE ORDEM DE CORTE DE MUDAS FECHADA B + A.CD_UPNIVEL1 AS "Fazenda", + A.CD_UPNIVEL3 AS "Talhao", + SUM(A.QT_AREA) AS "Area Fechada" + FROM PIMSCS.OCORTEMD_DE A + JOIN PIMSCS.OCORTEMD_HE B + ON A.NO_ORDEM = B.NO_ORDEM + WHERE A.CD_SAFRA = + (SELECT MAX(CD_SAFRA) + FROM PIMSCS.HISTPREPRO) + AND B.FG_SITUACAO = 'F' + GROUP BY A.CD_UPNIVEL1, A.CD_UPNIVEL3) B + WHERE a."Fazenda" = b."Fazenda"(+) + AND a."Talhao" = b."Talhao"(+) + AND a."Ocorrencia Cadastro" <> 'F' + AND (a."Area" - (CASE + WHEN b."Area Fechada" IS NULL THEN + 0 + ELSE + b."Area Fechada" + END)) > 0)) A + LEFT JOIN (SELECT --Ultima Estimativa do Talhão + A.CD_HIST "Cod. Historico", + CASE + WHEN A.CD_UNID_IND = 15 THEN + 'USF' + ELSE + 'URD' + END AS "Unidade", + A.CD_UPNIVEL1 AS "Zona", + A.CD_UPNIVEL3 AS "Talhao", + A.DT_HISTORICO AS "Data", + A.QT_AREA_PROD AS "Area", + (A.QT_CANA_ENTR / 1000) AS "Toneladas", + A.QT_TCH AS "TCH" + FROM PIMSCS.HISTPREPRO A + WHERE A.CD_UNID_IND IN (15, 19) + AND A.CD_SAFRA = + (SELECT MAX(CD_SAFRA) FROM PIMSCS.HISTPREPRO) + AND A.CD_HIST = 'S' + AND A.QT_AREA_PROD <> 0 + AND A.DT_HISTORICO = + (SELECT MAX(A2.DT_HISTORICO) + FROM PIMSCS.HISTPREPRO A2 + WHERE A.CD_SAFRA = A2.CD_SAFRA + AND A.CD_UPNIVEL2 = A2.CD_UPNIVEL1 + AND A.CD_UPNIVEL3 = A2.CD_UPNIVEL3 + AND A2.CD_HIST = 'S')) B + ON a."Fazenda" = b."Zona" + AND a."Talhao" = b."Talhao" + LEFT JOIN (SELECT --Distancia Cadastrada + A.CD_UPNIVEL1 AS "Zona", + A.CD_UPNIVEL3 AS "Talhao", + MAX(A.DS_TERRA) AS "Dist. Terra", + MAX(A.DS_ASFALTO) AS "Dist. Asfalto" + FROM PIMSCS.UPNIVEL3 A + LEFT JOIN PIMSCS.SAFRUPNIV3 B + ON A.CD_SAFRA = B.CD_SAFRA + AND A.CD_UPNIVEL1 = B.CD_UPNIVEL1 + AND A.CD_UPNIVEL3 = B.CD_UPNIVEL3 + WHERE A.CD_UNID_IND IN (15, 19) + AND A.CD_OCUP = 1 + AND A.CD_SAFRA = + (SELECT MAX(CD_SAFRA) FROM PIMSCS.HISTPREPRO) + AND B.CD_SAFRA = + (SELECT MAX(CD_SAFRA) FROM PIMSCS.HISTPREPRO) + AND B.FG_OCORREN <> 'I' + AND B.DT_OCORREN = + (SELECT MAX(B2.DT_OCORREN) + FROM PIMSCS.SAFRUPNIV3 B2 + WHERE B.CD_SAFRA = B2.CD_SAFRA + AND B.CD_UPNIVEL1 = B2.CD_UPNIVEL1 + AND B.CD_UPNIVEL3 = B2.CD_UPNIVEL3) + GROUP BY A.CD_UPNIVEL1, A.CD_UPNIVEL3) C + ON a."Fazenda" = c."Zona" + AND a."Talhao" = c."Talhao") B + ON a."Chave" = b."Chave" + + """ + print(SQLParseHandler().parse_sql(sql)) + assert SQLParseHandler().parse_sql(sql) == { + "command": "SELECT", + "query_string": 'SELECT --Locais Disponíveis para Abertura de Ordens de corte COALESCE(a."Chave", b."Chave") AS "Chave", COALESCE(a."Unidade", b."Unidade") AS "Unidade", COALESCE(a."Fazenda", b."Fazenda") AS "Fazenda", COALESCE(a."Talhao", b."Talhao") AS "Talhao", COALESCE(a."Participacao", b."Participacao") AS "Participacao", CASE WHEN a."Condicao" = \'Disponível Parcial (Moagem)\' AND b."Condicao" = \'Disponível Parcial (Mudas)\' THEN \'Disponível (Safra+Mudas)\' ELSE COALESCE(a."Condicao", b."Condicao") END AS "Condicao", COALESCE(a."Estagio", b."Estagio") AS "Estagio", COALESCE(a."Variedade", b."Variedade") AS "Variedade", COALESCE(a."Ciclo Maturacao", b."Ciclo Maturacao") AS "Ciclo Maturacao", COALESCE(a."Propriedade", b."Propriedade") AS "Propriedade", COALESCE(a."Proprietario", b."Proprietario") AS "Proprietario", COALESCE(a."No. Corte", b."No. Corte") AS "No. Corte", (CASE WHEN a."Area" IS NULL THEN 0 ELSE a."Area" END + CASE WHEN b."Area" IS NULL THEN 0 ELSE b."Area" END) AS "Area", CASE WHEN a."Condicao" = \'Disponível Parcial (Moagem)\' AND b."Condicao" = \'Disponível Parcial (Mudas)\' THEN ((CASE WHEN a."Area" IS NULL THEN 0 ELSE a."Area" END + CASE WHEN b."Area" IS NULL THEN 0 ELSE b."Area" END) * a."TCH") ELSE a."Toneladas" END AS "Toneladas", a."TCH", COALESCE(a."Distancia", b."Distancia") AS "Distancia" FROM (SELECT --Disponibilidade (Moagem) A.*, a."Area" * b."TCH" AS "Toneladas", b."TCH" AS "TCH", c."Dist. Terra" + c."Dist. Asfalto" AS "Distancia" FROM ((SELECT --ÁREAS DISPONÍVEIS PARA ABERTURA DE ORDEM CORTE DE SAFRA a."Fazenda" * 1000 + a."Talhao" AS "Chave", CASE WHEN a."Unidade" = 15 THEN \'USF\' ELSE \'URD\' END AS "Unidade", a."Fazenda", a."Talhao", a."Participacao", CASE WHEN a."Ocorrencia Cadastro" = \'C\' THEN \'Disponível Total (Moagem)\' ELSE \'Disponível Parcial (Moagem)\' END AS "Condicao", a."Estagio", a."Variedade", a."Ciclo Maturacao", a."Propriedade", a."Proprietario", a."No. Corte", (a."Area" - (CASE WHEN b."Area Fechada" IS NULL THEN 0 ELSE b."Area Fechada" END)) AS "Area" FROM (SELECT --ULTIMA ESTIMATIVA DO TALHAO A OBJ.CD_UNID_IND AS "Unidade", OBJ.CD_UPNIVEL1 AS "Fazenda", OBJ.CD_UPNIVEL3 AS "Talhao", OBJ.CD_UPNIVEL1 || \' - \' || F.DE_UPNIVEL1 AS "Propriedade", G.DE_FORNEC AS "Proprietario", CASE WHEN UP3.CD_TP_PROPR IN (1, 2, 3, 11) THEN \'Parceria\' WHEN UP3.CD_TP_PROPR IN (5, 8) THEN \'Fornecedor\' WHEN UP3.CD_TP_PROPR = 6 THEN \'Fornecedor\' WHEN UP3.CD_TP_PROPR = 14 THEN \'Parceria\' ELSE \'Verificar\' END AS "Participacao", C.FG_OCORREN AS "Ocorrencia Cadastro", C.DT_OCORREN AS "Data Ocorrencia", B.DA_ESTAGIO AS "Estagio", B.NO_CORTE AS "No. Corte", D.DE_VARIED AS "Variedade", E.DE_MATURAC AS "Ciclo Maturacao", (OBJ.QT_AREA_PROD * 1) AS "Area", (OBJ.QT_CANA_ENTR / 1000) AS "Toneladas" FROM PIMSCS.HISTPREPRO OBJ, PIMSCS.ESTAGIOS B, PIMSCS.UPNIVEL3 UP3, PIMSCS.SAFRUPNIV3 C, PIMSCS.VARIEDADES D, PIMSCS.TIPO_MATURAC E, PIMSCS.UPNIVEL1 F, PIMSCS.FORNECS G WHERE OBJ.CD_SAFRA = (SELECT MAX(CD_SAFRA) FROM PIMSCS.HISTPREPRO) AND OBJ.CD_UNID_IND IN (15, 19) AND OBJ.CD_ESTAGIO = B.CD_ESTAGIO AND OBJ.CD_UPNIVEL1 = UP3.CD_UPNIVEL1 AND OBJ.CD_UPNIVEL3 = UP3.CD_UPNIVEL3 AND OBJ.CD_SAFRA = UP3.CD_SAFRA AND OBJ.CD_UPNIVEL1 = C.CD_UPNIVEL1 AND OBJ.CD_UPNIVEL3 = C.CD_UPNIVEL3 AND OBJ.CD_SAFRA = C.CD_SAFRA AND UP3.CD_VARIED = D.CD_VARIED AND E.FG_MATURAC = D.FG_MATURAC AND OBJ.CD_UPNIVEL1 = F.CD_UPNIVEL1 AND F.CD_FORNEC = G.CD_FORNEC AND C.DT_OCORREN = (SELECT MAX(D.DT_OCORREN) FROM PIMSCS.SAFRUPNIV3 D WHERE D.CD_UPNIVEL1 = C.CD_UPNIVEL1 AND D.CD_UPNIVEL3 = C.CD_UPNIVEL3 AND D.CD_SAFRA = C.CD_SAFRA) AND OBJ.CD_HIST = (SELECT OBJ2.CD_HIST FROM PIMSCS.HISTPREPRO OBJ2 WHERE OBJ2.CD_UPNIVEL1 = OBJ.CD_UPNIVEL1 AND OBJ2.CD_UPNIVEL3 = OBJ.CD_UPNIVEL3 AND OBJ2.CD_SAFRA = (SELECT MAX(CD_SAFRA) FROM PIMSCS.HISTPREPRO) AND OBJ2.CD_HIST NOT IN (\'E\', \'S\') AND OBJ2.CD_EMPRESA IN (15, 19) AND OBJ2.DT_HISTORICO = (SELECT MAX(OBJ3.DT_HISTORICO) FROM PIMSCS.HISTPREPRO OBJ3 WHERE OBJ3.CD_UPNIVEL1 = OBJ.CD_UPNIVEL1 AND OBJ3.CD_UPNIVEL3 = OBJ.CD_UPNIVEL3 AND OBJ3.CD_SAFRA = (SELECT MAX(CD_SAFRA) FROM PIMSCS.HISTPREPRO) AND OBJ3.CD_HIST NOT IN (\'E\', \'S\') AND OBJ3.CD_EMPRESA IN (15, 19)))) A, (SELECT --ÁREA DE ORDEM DE CORTE DE SAFRA FECHADA B QD.CD_UPNIVEL1 AS "Fazenda", QD.CD_UPNIVEL3 AS "Talhao", SUM(QD.QT_AREA) AS "Area Fechada" FROM PIMSCS.QUEIMA_HE QH, PIMSCS.QUEIMA_DE QD WHERE QH.NO_QUEIMA = QD.NO_QUEIMA AND QD.CD_SAFRA = (SELECT MAX(CD_SAFRA) FROM PIMSCS.HISTPREPRO) GROUP BY QD.CD_UPNIVEL1, QD.CD_UPNIVEL3) B WHERE a."Fazenda" = b."Fazenda"(+) AND a."Talhao" = b."Talhao"(+) AND a."Ocorrencia Cadastro" <> \'F\' AND (a."Area" - (CASE WHEN b."Area Fechada" IS NULL THEN 0 ELSE b."Area Fechada" END)) > 0)) A LEFT JOIN (SELECT --Ultima Estimativa do Talhão A.CD_HIST "Cod. Historico", CASE WHEN A.CD_UNID_IND = 15 THEN \'USF\' ELSE \'URD\' END AS "Unidade", A.CD_UPNIVEL1 AS "Zona", A.CD_UPNIVEL3 AS "Talhao", A.DT_HISTORICO AS "Data", A.QT_AREA_PROD AS "Area", (A.QT_CANA_ENTR / 1000) AS "Toneladas", A.QT_TCH AS "TCH" FROM PIMSCS.HISTPREPRO A WHERE A.CD_UNID_IND IN (15, 19) AND A.CD_SAFRA = (SELECT MAX(CD_SAFRA) FROM PIMSCS.HISTPREPRO) AND A.CD_HIST NOT IN (\'E\', \'S\') AND A.QT_AREA_PROD <> 0 AND A.DT_HISTORICO = (SELECT MAX(A2.DT_HISTORICO) FROM PIMSCS.HISTPREPRO A2 WHERE A.CD_SAFRA = A2.CD_SAFRA AND A.CD_UPNIVEL2 = A2.CD_UPNIVEL1 AND A.CD_UPNIVEL3 = A2.CD_UPNIVEL3 AND A2.CD_HIST NOT IN (\'E\', \'S\'))) B ON a."Fazenda" = b."Zona" AND a."Talhao" = b."Talhao" LEFT JOIN (SELECT --Distancia Cadastrada A.CD_UPNIVEL1 AS "Zona", A.CD_UPNIVEL3 AS "Talhao", MAX(A.DS_TERRA) AS "Dist. Terra", MAX(A.DS_ASFALTO) AS "Dist. Asfalto" FROM PIMSCS.UPNIVEL3 A LEFT JOIN PIMSCS.SAFRUPNIV3 B ON A.CD_SAFRA = B.CD_SAFRA AND A.CD_UPNIVEL1 = B.CD_UPNIVEL1 AND A.CD_UPNIVEL3 = B.CD_UPNIVEL3 WHERE A.CD_UNID_IND IN (15, 19) AND A.CD_OCUP = 1 AND A.CD_SAFRA = (SELECT MAX(CD_SAFRA) FROM PIMSCS.HISTPREPRO) AND B.CD_SAFRA = (SELECT MAX(CD_SAFRA) FROM PIMSCS.HISTPREPRO) AND B.FG_OCORREN <> \'I\' AND B.DT_OCORREN = (SELECT MAX(B2.DT_OCORREN) FROM PIMSCS.SAFRUPNIV3 B2 WHERE B.CD_SAFRA = B2.CD_SAFRA AND B.CD_UPNIVEL1 = B2.CD_UPNIVEL1 AND B.CD_UPNIVEL3 = B2.CD_UPNIVEL3) GROUP BY A.CD_UPNIVEL1, A.CD_UPNIVEL3) C ON a."Fazenda" = c."Zona" AND a."Talhao" = c."Talhao") A FULL JOIN (SELECT --Disponibilidade (Mudas) A.*, a."Area" * b."TCH" AS "Toneladas", b."TCH" AS "TCH", c."Dist. Terra" + c."Dist. Asfalto" AS "Distancia" FROM ((SELECT --ÁREAS DISPONÍVEIS PARA ABERTURA DE ORDEM CORTE DE SAFRA a."Fazenda" * 1000 + a."Talhao" AS "Chave", CASE WHEN a."Unidade" = 15 THEN \'USF\' ELSE \'URD\' END AS "Unidade", a."Fazenda", a."Talhao", a."Participacao", CASE WHEN a."Ocorrencia Cadastro" = \'C\' THEN \'Disponível Total (Mudas)\' ELSE \'Disponível Parcial (Mudas)\' END AS "Condicao", a."Estagio", a."Variedade", a."Ciclo Maturacao", a."Propriedade", a."Proprietario", a."No. Corte", (a."Area" - (CASE WHEN b."Area Fechada" IS NULL THEN 0 ELSE b."Area Fechada" END)) AS "Area" FROM (SELECT --ULTIMA ESTIMATIVA DO TALHAO A OBJ.CD_UNID_IND AS "Unidade", OBJ.CD_UPNIVEL1 AS "Fazenda", OBJ.CD_UPNIVEL3 AS "Talhao", OBJ.CD_UPNIVEL1 || \' - \' || F.DE_UPNIVEL1 AS "Propriedade", G.DE_FORNEC AS "Proprietario", CASE WHEN UP3.CD_TP_PROPR IN (1, 2, 3, 11) THEN \'Parceria\' WHEN UP3.CD_TP_PROPR IN (5, 8) THEN \'Fornecedor\' WHEN UP3.CD_TP_PROPR = 6 THEN \'Fornecedor\' WHEN UP3.CD_TP_PROPR = 14 THEN \'Parceria\' ELSE \'Verificar\' END AS "Participacao", C.FG_OCORREN AS "Ocorrencia Cadastro", C.DT_OCORREN AS "Data Ocorrencia", B.DA_ESTAGIO AS "Estagio", B.NO_CORTE AS "No. Corte", D.DE_VARIED AS "Variedade", E.DE_MATURAC AS "Ciclo Maturacao", (OBJ.QT_AREA_PROD * 1) AS "Area", (OBJ.QT_CANA_ENTR / 1000) AS "Toneladas" FROM PIMSCS.HISTPREPRO OBJ, PIMSCS.ESTAGIOS B, PIMSCS.UPNIVEL3 UP3, PIMSCS.SAFRUPNIV3 C, PIMSCS.VARIEDADES D, PIMSCS.TIPO_MATURAC E, PIMSCS.UPNIVEL1 F, PIMSCS.FORNECS G WHERE OBJ.CD_SAFRA = (SELECT MAX(CD_SAFRA) FROM PIMSCS.HISTPREPRO) AND OBJ.CD_UNID_IND IN (15, 19) AND OBJ.CD_ESTAGIO = B.CD_ESTAGIO AND OBJ.CD_UPNIVEL1 = UP3.CD_UPNIVEL1 AND OBJ.CD_UPNIVEL3 = UP3.CD_UPNIVEL3 AND OBJ.CD_SAFRA = UP3.CD_SAFRA AND OBJ.CD_UPNIVEL1 = C.CD_UPNIVEL1 AND OBJ.CD_UPNIVEL3 = C.CD_UPNIVEL3 AND OBJ.CD_SAFRA = C.CD_SAFRA AND UP3.CD_VARIED = D.CD_VARIED AND E.FG_MATURAC = D.FG_MATURAC AND OBJ.CD_UPNIVEL1 = F.CD_UPNIVEL1 AND F.CD_FORNEC = G.CD_FORNEC AND C.DT_OCORREN = (SELECT MAX(D.DT_OCORREN) FROM PIMSCS.SAFRUPNIV3 D WHERE D.CD_UPNIVEL1 = C.CD_UPNIVEL1 AND D.CD_UPNIVEL3 = C.CD_UPNIVEL3 AND D.CD_SAFRA = C.CD_SAFRA) AND OBJ.CD_HIST = (SELECT OBJ2.CD_HIST FROM PIMSCS.HISTPREPRO OBJ2 WHERE OBJ2.CD_UPNIVEL1 = OBJ.CD_UPNIVEL1 AND OBJ2.CD_UPNIVEL3 = OBJ.CD_UPNIVEL3 AND OBJ2.CD_SAFRA = (SELECT MAX(CD_SAFRA) FROM PIMSCS.HISTPREPRO) AND OBJ2.CD_HIST = \'S\' AND OBJ2.CD_EMPRESA IN (15, 19) AND OBJ2.DT_HISTORICO = (SELECT MAX(OBJ3.DT_HISTORICO) FROM PIMSCS.HISTPREPRO OBJ3 WHERE OBJ3.CD_UPNIVEL1 = OBJ.CD_UPNIVEL1 AND OBJ3.CD_UPNIVEL3 = OBJ.CD_UPNIVEL3 AND OBJ3.CD_SAFRA = (SELECT MAX(CD_SAFRA) FROM PIMSCS.HISTPREPRO) AND OBJ3.CD_HIST = \'S\' AND OBJ3.CD_EMPRESA IN (15, 19)))) A, (SELECT --ÁREA DE ORDEM DE CORTE DE MUDAS FECHADA B A.CD_UPNIVEL1 AS "Fazenda", A.CD_UPNIVEL3 AS "Talhao", SUM(A.QT_AREA) AS "Area Fechada" FROM PIMSCS.OCORTEMD_DE A JOIN PIMSCS.OCORTEMD_HE B ON A.NO_ORDEM = B.NO_ORDEM WHERE A.CD_SAFRA = (SELECT MAX(CD_SAFRA) FROM PIMSCS.HISTPREPRO) AND B.FG_SITUACAO = \'F\' GROUP BY A.CD_UPNIVEL1, A.CD_UPNIVEL3) B WHERE a."Fazenda" = b."Fazenda"(+) AND a."Talhao" = b."Talhao"(+) AND a."Ocorrencia Cadastro" <> \'F\' AND (a."Area" - (CASE WHEN b."Area Fechada" IS NULL THEN 0 ELSE b."Area Fechada" END)) > 0)) A LEFT JOIN (SELECT --Ultima Estimativa do Talhão A.CD_HIST "Cod. Historico", CASE WHEN A.CD_UNID_IND = 15 THEN \'USF\' ELSE \'URD\' END AS "Unidade", A.CD_UPNIVEL1 AS "Zona", A.CD_UPNIVEL3 AS "Talhao", A.DT_HISTORICO AS "Data", A.QT_AREA_PROD AS "Area", (A.QT_CANA_ENTR / 1000) AS "Toneladas", A.QT_TCH AS "TCH" FROM PIMSCS.HISTPREPRO A WHERE A.CD_UNID_IND IN (15, 19) AND A.CD_SAFRA = (SELECT MAX(CD_SAFRA) FROM PIMSCS.HISTPREPRO) AND A.CD_HIST = \'S\' AND A.QT_AREA_PROD <> 0 AND A.DT_HISTORICO = (SELECT MAX(A2.DT_HISTORICO) FROM PIMSCS.HISTPREPRO A2 WHERE A.CD_SAFRA = A2.CD_SAFRA AND A.CD_UPNIVEL2 = A2.CD_UPNIVEL1 AND A.CD_UPNIVEL3 = A2.CD_UPNIVEL3 AND A2.CD_HIST = \'S\')) B ON a."Fazenda" = b."Zona" AND a."Talhao" = b."Talhao" LEFT JOIN (SELECT --Distancia Cadastrada A.CD_UPNIVEL1 AS "Zona", A.CD_UPNIVEL3 AS "Talhao", MAX(A.DS_TERRA) AS "Dist. Terra", MAX(A.DS_ASFALTO) AS "Dist. Asfalto" FROM PIMSCS.UPNIVEL3 A LEFT JOIN PIMSCS.SAFRUPNIV3 B ON A.CD_SAFRA = B.CD_SAFRA AND A.CD_UPNIVEL1 = B.CD_UPNIVEL1 AND A.CD_UPNIVEL3 = B.CD_UPNIVEL3 WHERE A.CD_UNID_IND IN (15, 19) AND A.CD_OCUP = 1 AND A.CD_SAFRA = (SELECT MAX(CD_SAFRA) FROM PIMSCS.HISTPREPRO) AND B.CD_SAFRA = (SELECT MAX(CD_SAFRA) FROM PIMSCS.HISTPREPRO) AND B.FG_OCORREN <> \'I\' AND B.DT_OCORREN = (SELECT MAX(B2.DT_OCORREN) FROM PIMSCS.SAFRUPNIV3 B2 WHERE B.CD_SAFRA = B2.CD_SAFRA AND B.CD_UPNIVEL1 = B2.CD_UPNIVEL1 AND B.CD_UPNIVEL3 = B2.CD_UPNIVEL3) GROUP BY A.CD_UPNIVEL1, A.CD_UPNIVEL3) C ON a."Fazenda" = c."Zona" AND a."Talhao" = c."Talhao") B ON a."Chave" = b."Chave"', # noqa + "query_digest_text": "SELECT --Locais Disponíveis para Abertura de Ordens de corte COALESCE ( a . '?' , b . '?' ) AS '?' , COALESCE ( a . '?' , b . '?' ) AS '?' , COALESCE ( a . '?' , b . '?' ) AS '?' , COALESCE ( a . '?' , b . '?' ) AS '?' , COALESCE ( a . '?' , b . '?' ) AS '?' , CASE WHEN a . '?' = '?' AND b . '?' = '?' THEN '?' ELSE COALESCE ( a . '?' , b . '?' ) END AS '?' , COALESCE ( a . '?' , b . '?' ) AS '?' , COALESCE ( a . '?' , b . '?' ) AS '?' , COALESCE ( a . '?' , b . '?' ) AS '?' , COALESCE ( a . '?' , b . '?' ) AS '?' , COALESCE ( a . '?' , b . '?' ) AS '?' , COALESCE ( a . '?' , b . '?' ) AS '?' , ( CASE WHEN a . '?' IS NULL THEN ? ELSE a . '?' END + CASE WHEN b . '?' IS NULL THEN ? ELSE b . '?' END ) AS '?' , CASE WHEN a . '?' = '?' AND b . '?' = '?' THEN ( ( CASE WHEN a . '?' IS NULL THEN ? ELSE a . '?' END + CASE WHEN b . '?' IS NULL THEN ? ELSE b . '?' END ) * a . '?' ) ELSE a . '?' END AS '?' , a . '?' , COALESCE ( a . '?' , b . '?' ) AS '?' FROM ( SELECT --Disponibilidade (Moagem) A . * , a . '?' * b . '?' AS '?' , b . '?' AS '?' , c . '?' + c . '?' AS '?' FROM ( ( SELECT --ÁREAS DISPONÍVEIS PARA ABERTURA DE ORDEM CORTE DE SAFRA a . '?' * ? + a . '?' AS '?' , CASE WHEN a . '?' = ? THEN '?' ELSE '?' END AS '?' , a . '?' , a . '?' , a . '?' , CASE WHEN a . '?' = '?' THEN '?' ELSE '?' END AS '?' , a . '?' , a . '?' , a . '?' , a . '?' , a . '?' , a . '?' , ( a . '?' - ( CASE WHEN b . '?' IS NULL THEN ? ELSE b . '?' END ) ) AS '?' FROM ( SELECT --ULTIMA ESTIMATIVA DO TALHAO A OBJ . CD_UNID_IND AS '?' , OBJ . CD_UPNIVEL1 AS '?' , OBJ . CD_UPNIVEL3 AS '?' , OBJ . CD_UPNIVEL1 || '?' || F . DE_UPNIVEL1 AS '?' , G . DE_FORNEC AS '?' , CASE WHEN UP3 . CD_TP_PROPR IN ( ? , ? , ? , ? ) THEN '?' WHEN UP3 . CD_TP_PROPR IN ( ? , ? ) THEN '?' WHEN UP3 . CD_TP_PROPR = ? THEN '?' WHEN UP3 . CD_TP_PROPR = ? THEN '?' ELSE '?' END AS '?' , C . FG_OCORREN AS '?' , C . DT_OCORREN AS '?' , B . DA_ESTAGIO AS '?' , B . NO_CORTE AS '?' , D . DE_VARIED AS '?' , E . DE_MATURAC AS '?' , ( OBJ . QT_AREA_PROD * ? ) AS '?' , ( OBJ . QT_CANA_ENTR / ? ) AS '?' FROM PIMSCS . HISTPREPRO OBJ , PIMSCS . ESTAGIOS B , PIMSCS . UPNIVEL3 UP3 , PIMSCS . SAFRUPNIV3 C , PIMSCS . VARIEDADES D , PIMSCS . TIPO_MATURAC E , PIMSCS . UPNIVEL1 F , PIMSCS . FORNECS G WHERE OBJ . CD_SAFRA = ( SELECT MAX ( CD_SAFRA ) FROM PIMSCS . HISTPREPRO ) AND OBJ . CD_UNID_IND IN ( ? , ? ) AND OBJ . CD_ESTAGIO = B . CD_ESTAGIO AND OBJ . CD_UPNIVEL1 = UP3 . CD_UPNIVEL1 AND OBJ . CD_UPNIVEL3 = UP3 . CD_UPNIVEL3 AND OBJ . CD_SAFRA = UP3 . CD_SAFRA AND OBJ . CD_UPNIVEL1 = C . CD_UPNIVEL1 AND OBJ . CD_UPNIVEL3 = C . CD_UPNIVEL3 AND OBJ . CD_SAFRA = C . CD_SAFRA AND UP3 . CD_VARIED = D . CD_VARIED AND E . FG_MATURAC = D . FG_MATURAC AND OBJ . CD_UPNIVEL1 = F . CD_UPNIVEL1 AND F . CD_FORNEC = G . CD_FORNEC AND C . DT_OCORREN = ( SELECT MAX ( D . DT_OCORREN ) FROM PIMSCS . SAFRUPNIV3 D WHERE D . CD_UPNIVEL1 = C . CD_UPNIVEL1 AND D . CD_UPNIVEL3 = C . CD_UPNIVEL3 AND D . CD_SAFRA = C . CD_SAFRA ) AND OBJ . CD_HIST = ( SELECT OBJ2 . CD_HIST FROM PIMSCS . HISTPREPRO OBJ2 WHERE OBJ2 . CD_UPNIVEL1 = OBJ . CD_UPNIVEL1 AND OBJ2 . CD_UPNIVEL3 = OBJ . CD_UPNIVEL3 AND OBJ2 . CD_SAFRA = ( SELECT MAX ( CD_SAFRA ) FROM PIMSCS . HISTPREPRO ) AND OBJ2 . CD_HIST NOT IN ( '?' , '?' ) AND OBJ2 . CD_EMPRESA IN ( ? , ? ) AND OBJ2 . DT_HISTORICO = ( SELECT MAX ( OBJ3 . DT_HISTORICO ) FROM PIMSCS . HISTPREPRO OBJ3 WHERE OBJ3 . CD_UPNIVEL1 = OBJ . CD_UPNIVEL1 AND OBJ3 . CD_UPNIVEL3 = OBJ . CD_UPNIVEL3 AND OBJ3 . CD_SAFRA = ( SELECT MAX ( CD_SAFRA ) FROM PIMSCS . HISTPREPRO ) AND OBJ3 . CD_HIST NOT IN ( '?' , '?' ) AND OBJ3 . CD_EMPRESA IN ( ? , ? ) ) ) ) A , ( SELECT --ÁREA DE ORDEM DE CORTE DE SAFRA FECHADA B QD . CD_UPNIVEL1 AS '?' , QD . CD_UPNIVEL3 AS '?' , SUM ( QD . QT_AREA ) AS '?' FROM PIMSCS . QUEIMA_HE QH , PIMSCS . QUEIMA_DE QD WHERE QH . NO_QUEIMA = QD . NO_QUEIMA AND QD . CD_SAFRA = ( SELECT MAX ( CD_SAFRA ) FROM PIMSCS . HISTPREPRO ) GROUP BY QD . CD_UPNIVEL1 , QD . CD_UPNIVEL3 ) B WHERE a . '?' = b . '?' ( + ) AND a . '?' = b . '?' ( + ) AND a . '?' <> '?' AND ( a . '?' - ( CASE WHEN b . '?' IS NULL THEN ? ELSE b . '?' END ) ) > ? ) ) A LEFT JOIN ( SELECT --Ultima Estimativa do Talhão A . CD_HIST '?' , CASE WHEN A . CD_UNID_IND = ? THEN '?' ELSE '?' END AS '?' , A . CD_UPNIVEL1 AS '?' , A . CD_UPNIVEL3 AS '?' , A . DT_HISTORICO AS '?' , A . QT_AREA_PROD AS '?' , ( A . QT_CANA_ENTR / ? ) AS '?' , A . QT_TCH AS '?' FROM PIMSCS . HISTPREPRO A WHERE A . CD_UNID_IND IN ( ? , ? ) AND A . CD_SAFRA = ( SELECT MAX ( CD_SAFRA ) FROM PIMSCS . HISTPREPRO ) AND A . CD_HIST NOT IN ( '?' , '?' ) AND A . QT_AREA_PROD <> ? AND A . DT_HISTORICO = ( SELECT MAX ( A2 . DT_HISTORICO ) FROM PIMSCS . HISTPREPRO A2 WHERE A . CD_SAFRA = A2 . CD_SAFRA AND A . CD_UPNIVEL2 = A2 . CD_UPNIVEL1 AND A . CD_UPNIVEL3 = A2 . CD_UPNIVEL3 AND A2 . CD_HIST NOT IN ( '?' , '?' ) ) ) B ON a . '?' = b . '?' AND a . '?' = b . '?' LEFT JOIN ( SELECT --Distancia Cadastrada A . CD_UPNIVEL1 AS '?' , A . CD_UPNIVEL3 AS '?' , MAX ( A . DS_TERRA ) AS '?' , MAX ( A . DS_ASFALTO ) AS '?' FROM PIMSCS . UPNIVEL3 A LEFT JOIN PIMSCS . SAFRUPNIV3 B ON A . CD_SAFRA = B . CD_SAFRA AND A . CD_UPNIVEL1 = B . CD_UPNIVEL1 AND A . CD_UPNIVEL3 = B . CD_UPNIVEL3 WHERE A . CD_UNID_IND IN ( ? , ? ) AND A . CD_OCUP = ? AND A . CD_SAFRA = ( SELECT MAX ( CD_SAFRA ) FROM PIMSCS . HISTPREPRO ) AND B . CD_SAFRA = ( SELECT MAX ( CD_SAFRA ) FROM PIMSCS . HISTPREPRO ) AND B . FG_OCORREN <> '?' AND B . DT_OCORREN = ( SELECT MAX ( B2 . DT_OCORREN ) FROM PIMSCS . SAFRUPNIV3 B2 WHERE B . CD_SAFRA = B2 . CD_SAFRA AND B . CD_UPNIVEL1 = B2 . CD_UPNIVEL1 AND B . CD_UPNIVEL3 = B2 . CD_UPNIVEL3 ) GROUP BY A . CD_UPNIVEL1 , A . CD_UPNIVEL3 ) C ON a . '?' = c . '?' AND a . '?' = c . '?' ) A FULL JOIN ( SELECT --Disponibilidade (Mudas) A . * , a . '?' * b . '?' AS '?' , b . '?' AS '?' , c . '?' + c . '?' AS '?' FROM ( ( SELECT --ÁREAS DISPONÍVEIS PARA ABERTURA DE ORDEM CORTE DE SAFRA a . '?' * ? + a . '?' AS '?' , CASE WHEN a . '?' = ? THEN '?' ELSE '?' END AS '?' , a . '?' , a . '?' , a . '?' , CASE WHEN a . '?' = '?' THEN '?' ELSE '?' END AS '?' , a . '?' , a . '?' , a . '?' , a . '?' , a . '?' , a . '?' , ( a . '?' - ( CASE WHEN b . '?' IS NULL THEN ? ELSE b . '?' END ) ) AS '?' FROM ( SELECT --ULTIMA ESTIMATIVA DO TALHAO A OBJ . CD_UNID_IND AS '?' , OBJ . CD_UPNIVEL1 AS '?' , OBJ . CD_UPNIVEL3 AS '?' , OBJ . CD_UPNIVEL1 || '?' || F . DE_UPNIVEL1 AS '?' , G . DE_FORNEC AS '?' , CASE WHEN UP3 . CD_TP_PROPR IN ( ? , ? , ? , ? ) THEN '?' WHEN UP3 . CD_TP_PROPR IN ( ? , ? ) THEN '?' WHEN UP3 . CD_TP_PROPR = ? THEN '?' WHEN UP3 . CD_TP_PROPR = ? THEN '?' ELSE '?' END AS '?' , C . FG_OCORREN AS '?' , C . DT_OCORREN AS '?' , B . DA_ESTAGIO AS '?' , B . NO_CORTE AS '?' , D . DE_VARIED AS '?' , E . DE_MATURAC AS '?' , ( OBJ . QT_AREA_PROD * ? ) AS '?' , ( OBJ . QT_CANA_ENTR / ? ) AS '?' FROM PIMSCS . HISTPREPRO OBJ , PIMSCS . ESTAGIOS B , PIMSCS . UPNIVEL3 UP3 , PIMSCS . SAFRUPNIV3 C , PIMSCS . VARIEDADES D , PIMSCS . TIPO_MATURAC E , PIMSCS . UPNIVEL1 F , PIMSCS . FORNECS G WHERE OBJ . CD_SAFRA = ( SELECT MAX ( CD_SAFRA ) FROM PIMSCS . HISTPREPRO ) AND OBJ . CD_UNID_IND IN ( ? , ? ) AND OBJ . CD_ESTAGIO = B . CD_ESTAGIO AND OBJ . CD_UPNIVEL1 = UP3 . CD_UPNIVEL1 AND OBJ . CD_UPNIVEL3 = UP3 . CD_UPNIVEL3 AND OBJ . CD_SAFRA = UP3 . CD_SAFRA AND OBJ . CD_UPNIVEL1 = C . CD_UPNIVEL1 AND OBJ . CD_UPNIVEL3 = C . CD_UPNIVEL3 AND OBJ . CD_SAFRA = C . CD_SAFRA AND UP3 . CD_VARIED = D . CD_VARIED AND E . FG_MATURAC = D . FG_MATURAC AND OBJ . CD_UPNIVEL1 = F . CD_UPNIVEL1 AND F . CD_FORNEC = G . CD_FORNEC AND C . DT_OCORREN = ( SELECT MAX ( D . DT_OCORREN ) FROM PIMSCS . SAFRUPNIV3 D WHERE D . CD_UPNIVEL1 = C . CD_UPNIVEL1 AND D . CD_UPNIVEL3 = C . CD_UPNIVEL3 AND D . CD_SAFRA = C . CD_SAFRA ) AND OBJ . CD_HIST = ( SELECT OBJ2 . CD_HIST FROM PIMSCS . HISTPREPRO OBJ2 WHERE OBJ2 . CD_UPNIVEL1 = OBJ . CD_UPNIVEL1 AND OBJ2 . CD_UPNIVEL3 = OBJ . CD_UPNIVEL3 AND OBJ2 . CD_SAFRA = ( SELECT MAX ( CD_SAFRA ) FROM PIMSCS . HISTPREPRO ) AND OBJ2 . CD_HIST = '?' AND OBJ2 . CD_EMPRESA IN ( ? , ? ) AND OBJ2 . DT_HISTORICO = ( SELECT MAX ( OBJ3 . DT_HISTORICO ) FROM PIMSCS . HISTPREPRO OBJ3 WHERE OBJ3 . CD_UPNIVEL1 = OBJ . CD_UPNIVEL1 AND OBJ3 . CD_UPNIVEL3 = OBJ . CD_UPNIVEL3 AND OBJ3 . CD_SAFRA = ( SELECT MAX ( CD_SAFRA ) FROM PIMSCS . HISTPREPRO ) AND OBJ3 . CD_HIST = '?' AND OBJ3 . CD_EMPRESA IN ( ? , ? ) ) ) ) A , ( SELECT --ÁREA DE ORDEM DE CORTE DE MUDAS FECHADA B A . CD_UPNIVEL1 AS '?' , A . CD_UPNIVEL3 AS '?' , SUM ( A . QT_AREA ) AS '?' FROM PIMSCS . OCORTEMD_DE A JOIN PIMSCS . OCORTEMD_HE B ON A . NO_ORDEM = B . NO_ORDEM WHERE A . CD_SAFRA = ( SELECT MAX ( CD_SAFRA ) FROM PIMSCS . HISTPREPRO ) AND B . FG_SITUACAO = '?' GROUP BY A . CD_UPNIVEL1 , A . CD_UPNIVEL3 ) B WHERE a . '?' = b . '?' ( + ) AND a . '?' = b . '?' ( + ) AND a . '?' <> '?' AND ( a . '?' - ( CASE WHEN b . '?' IS NULL THEN ? ELSE b . '?' END ) ) > ? ) ) A LEFT JOIN ( SELECT --Ultima Estimativa do Talhão A . CD_HIST '?' , CASE WHEN A . CD_UNID_IND = ? THEN '?' ELSE '?' END AS '?' , A . CD_UPNIVEL1 AS '?' , A . CD_UPNIVEL3 AS '?' , A . DT_HISTORICO AS '?' , A . QT_AREA_PROD AS '?' , ( A . QT_CANA_ENTR / ? ) AS '?' , A . QT_TCH AS '?' FROM PIMSCS . HISTPREPRO A WHERE A . CD_UNID_IND IN ( ? , ? ) AND A . CD_SAFRA = ( SELECT MAX ( CD_SAFRA ) FROM PIMSCS . HISTPREPRO ) AND A . CD_HIST = '?' AND A . QT_AREA_PROD <> ? AND A . DT_HISTORICO = ( SELECT MAX ( A2 . DT_HISTORICO ) FROM PIMSCS . HISTPREPRO A2 WHERE A . CD_SAFRA = A2 . CD_SAFRA AND A . CD_UPNIVEL2 = A2 . CD_UPNIVEL1 AND A . CD_UPNIVEL3 = A2 . CD_UPNIVEL3 AND A2 . CD_HIST = '?' ) ) B ON a . '?' = b . '?' AND a . '?' = b . '?' LEFT JOIN ( SELECT --Distancia Cadastrada A . CD_UPNIVEL1 AS '?' , A . CD_UPNIVEL3 AS '?' , MAX ( A . DS_TERRA ) AS '?' , MAX ( A . DS_ASFALTO ) AS '?' FROM PIMSCS . UPNIVEL3 A LEFT JOIN PIMSCS . SAFRUPNIV3 B ON A . CD_SAFRA = B . CD_SAFRA AND A . CD_UPNIVEL1 = B . CD_UPNIVEL1 AND A . CD_UPNIVEL3 = B . CD_UPNIVEL3 WHERE A . CD_UNID_IND IN ( ? , ? ) AND A . CD_OCUP = ? AND A . CD_SAFRA = ( SELECT MAX ( CD_SAFRA ) FROM PIMSCS . HISTPREPRO ) AND B . CD_SAFRA = ( SELECT MAX ( CD_SAFRA ) FROM PIMSCS . HISTPREPRO ) AND B . FG_OCORREN <> '?' AND B . DT_OCORREN = ( SELECT MAX ( B2 . DT_OCORREN ) FROM PIMSCS . SAFRUPNIV3 B2 WHERE B . CD_SAFRA = B2 . CD_SAFRA AND B . CD_UPNIVEL1 = B2 . CD_UPNIVEL1 AND B . CD_UPNIVEL3 = B2 . CD_UPNIVEL3 ) GROUP BY A . CD_UPNIVEL1 , A . CD_UPNIVEL3 ) C ON a . '?' = c . '?' AND a . '?' = c . '?' ) B ON a . '?' = b . '?'", # noqa + "query_digest_md5": "d128d643be71089fbe56ac23ed28f44f", + "table_name": "PIMSCS.ESTAGIOS,PIMSCS.FORNECS,PIMSCS.HISTPREPRO,PIMSCS.OCORTEMD_DE,PIMSCS.OCORTEMD_HE,PIMSCS.QUEIMA_DE,PIMSCS.QUEIMA_HE,PIMSCS.SAFRUPNIV3,PIMSCS.TIPO_MATURAC,PIMSCS.UPNIVEL1,PIMSCS.UPNIVEL3,PIMSCS.VARIEDADES", # noqa + "query_length": 11057, + } diff --git a/dbm-ui/backend/ticket/builders/mysql/mysql_ha_destroy.py b/dbm-ui/backend/ticket/builders/mysql/mysql_ha_destroy.py index 5f1b95e7fe..0a4c0b4ee2 100644 --- a/dbm-ui/backend/ticket/builders/mysql/mysql_ha_destroy.py +++ b/dbm-ui/backend/ticket/builders/mysql/mysql_ha_destroy.py @@ -62,7 +62,7 @@ def cluster_dumper_destroy(self): def patch_ticket_detail(self): # TODO: 集群下架流程,暂时不需要联动dumper下架,后续看体验再加上 # self.ticket.update_details(dumper_destroy_infos=self.cluster_dumper_destroy()) - pass + super().patch_ticket_detail() def custom_ticket_flows(self): # 下架流程 diff --git a/dbm-ui/backend/ticket/builders/mysql/mysql_migrate_cluster.py b/dbm-ui/backend/ticket/builders/mysql/mysql_migrate_cluster.py index 414382b7c3..a08819637b 100644 --- a/dbm-ui/backend/ticket/builders/mysql/mysql_migrate_cluster.py +++ b/dbm-ui/backend/ticket/builders/mysql/mysql_migrate_cluster.py @@ -32,7 +32,7 @@ class MigrateClusterInfoSerializer(serializers.Serializer): ip_source = serializers.ChoiceField( help_text=_("机器来源"), choices=IpSource.get_choices(), required=False, default=IpSource.MANUAL_INPUT ) - infos = serializers.ListField(help_text=_("克隆主从信息"), child=MigrateClusterInfoSerializer()) + infos = serializers.ListField(help_text=_("迁移主从信息"), child=MigrateClusterInfoSerializer()) backup_source = serializers.ChoiceField( help_text=_("备份源"), choices=MySQLBackupSource.get_choices(), default=MySQLBackupSource.REMOTE ) @@ -87,6 +87,6 @@ def post_callback(self): class MysqlMigrateClusterFlowBuilder(BaseMySQLTicketFlowBuilder): serializer = MysqlMigrateClusterDetailSerializer inner_flow_builder = MysqlMigrateClusterParamBuilder - inner_flow_name = _("克隆主从执行") + inner_flow_name = _("迁移主从执行") resource_batch_apply_builder = MysqlMigrateClusterResourceParamBuilder retry_type = FlowRetryType.MANUAL_RETRY diff --git a/dbm-ui/backend/ticket/builders/mysql/mysql_partition.py b/dbm-ui/backend/ticket/builders/mysql/mysql_partition.py index 1ef53733df..353027e865 100644 --- a/dbm-ui/backend/ticket/builders/mysql/mysql_partition.py +++ b/dbm-ui/backend/ticket/builders/mysql/mysql_partition.py @@ -12,6 +12,7 @@ from django.utils.translation import gettext_lazy as _ from rest_framework import serializers +from backend.db_meta.models import AppCache from backend.flow.engine.controller.mysql import MySQLController from backend.ticket import builders from backend.ticket.builders.mysql.base import BaseMySQLTicketFlowBuilder, MySQLBaseOperateDetailSerializer @@ -55,7 +56,8 @@ class MySQLPartitionParamBuilder(builders.FlowParamBuilder): controller = MySQLController.mysql_partition def format_ticket_data(self): - pass + app = AppCache.objects.get(bk_biz_id=self.ticket_data["bk_biz_id"]) + self.ticket_data.update(bk_biz_name=app.bk_biz_name, db_app_abbr=app.db_app_abbr) @builders.BuilderFactory.register(TicketType.MYSQL_PARTITION) diff --git a/dbm-ui/backend/ticket/builders/mysql/mysql_single_clear.py b/dbm-ui/backend/ticket/builders/mysql/mysql_single_clear.py new file mode 100644 index 0000000000..7bb9c4af47 --- /dev/null +++ b/dbm-ui/backend/ticket/builders/mysql/mysql_single_clear.py @@ -0,0 +1,38 @@ +# -*- coding: utf-8 -*- +""" +TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-DB管理系统(BlueKing-BK-DBM) available. +Copyright (C) 2017-2023 THL A29 Limited, a Tencent company. All rights reserved. +Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. +You may obtain a copy of the License at https://opensource.org/licenses/MIT +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on +an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the +specific language governing permissions and limitations under the License. +""" + +from django.utils.translation import ugettext_lazy as _ + +from backend.flow.engine.controller.mysql import MySQLController +from backend.ticket import builders +from backend.ticket.builders.mysql.base import BaseMySQLTicketFlowBuilder +from backend.ticket.builders.mysql.mysql_ha_clear import MySQLHaClearDetailSerializer, MySQLHaClearFlowParamBuilder +from backend.ticket.constants import TicketType + + +class MySQLSingleClearDetailSerializer(MySQLHaClearDetailSerializer): + pass + + +class MySQLSingleClearFlowParamBuilder(MySQLHaClearFlowParamBuilder): + """MySQL清档执行单据参数""" + + controller = MySQLController.mysql_single_truncate_data_scene + + def format_ticket_data(self): + pass + + +@builders.BuilderFactory.register(TicketType.MYSQL_SINGLE_TRUNCATE_DATA) +class MySQLSingleClearFlowBuilder(BaseMySQLTicketFlowBuilder): + serializer = MySQLSingleClearDetailSerializer + inner_flow_builder = MySQLSingleClearFlowParamBuilder + inner_flow_name = _("MySQL 单节点清档执行") diff --git a/dbm-ui/backend/ticket/builders/mysql/mysql_single_rename.py b/dbm-ui/backend/ticket/builders/mysql/mysql_single_rename.py new file mode 100644 index 0000000000..34b805164b --- /dev/null +++ b/dbm-ui/backend/ticket/builders/mysql/mysql_single_rename.py @@ -0,0 +1,33 @@ +# -*- coding: utf-8 -*- +""" +TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-DB管理系统(BlueKing-BK-DBM) available. +Copyright (C) 2017-2023 THL A29 Limited, a Tencent company. All rights reserved. +Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. +You may obtain a copy of the License at https://opensource.org/licenses/MIT +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on +an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the +specific language governing permissions and limitations under the License. +""" + +from django.utils.translation import ugettext_lazy as _ + +from backend.flow.engine.controller.mysql import MySQLController +from backend.ticket import builders +from backend.ticket.builders.mysql.base import BaseMySQLTicketFlowBuilder +from backend.ticket.builders.mysql.mysql_ha_rename import MySQLHaRenameFlowParamBuilder, MySQLHaRenameSerializer +from backend.ticket.constants import TicketType + + +class MySQLSingleRenameDetailSerializer(MySQLHaRenameSerializer): + pass + + +class MySQLSingleRenameFlowParamBuilder(MySQLHaRenameFlowParamBuilder): + controller = MySQLController.mysql_single_rename_database_scene + + +@builders.BuilderFactory.register(TicketType.MYSQL_SINGLE_RENAME_DATABASE) +class MySQLSingleClearFlowBuilder(BaseMySQLTicketFlowBuilder): + serializer = MySQLSingleRenameDetailSerializer + inner_flow_builder = MySQLSingleRenameFlowParamBuilder + inner_flow_name = _("MySQL 单节点DB重命名执行") diff --git a/dbm-ui/backend/ticket/builders/redis/redis_cluster_version_update.py b/dbm-ui/backend/ticket/builders/redis/redis_cluster_version_update.py new file mode 100644 index 0000000000..d929902aec --- /dev/null +++ b/dbm-ui/backend/ticket/builders/redis/redis_cluster_version_update.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- +""" +TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-DB管理系统(BlueKing-BK-DBM) available. +Copyright (C) 2017-2023 THL A29 Limited, a Tencent company. All rights reserved. +Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. +You may obtain a copy of the License at https://opensource.org/licenses/MIT +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on +an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the +specific language governing permissions and limitations under the License. +""" +from django.utils.translation import ugettext_lazy as _ +from rest_framework import serializers + +from backend.db_meta.enums.comm import RedisVerUpdateNodeType +from backend.flow.engine.controller.redis import RedisController +from backend.ticket import builders +from backend.ticket.builders.redis.base import BaseRedisTicketFlowBuilder +from backend.ticket.constants import TicketType + + +class RedisVersionUpdateDetailSerializer(serializers.Serializer): + class UpdateInfoSerializer(serializers.Serializer): + cluster_id = serializers.IntegerField(help_text=_("集群ID")) + node_type = serializers.ChoiceField(help_text=_("节点类型"), choices=RedisVerUpdateNodeType.get_choices()) + current_versions = serializers.ListField(help_text=_("当前版本列表"), child=serializers.CharField()) + target_version = serializers.CharField(help_text=_("目标版本")) + + infos = serializers.ListField(help_text=_("版本升级信息"), child=UpdateInfoSerializer()) + + def to_representation(self, details): + return details + + +class RedisVersionUpdateFlowParamBuilder(builders.FlowParamBuilder): + controller = RedisController.redis_cluster_version_update_online + + def format_ticket_data(self): + pass + + +@builders.BuilderFactory.register(TicketType.REDIS_CLUSTER_VERSION_UPDATE_ONLINE) +class RedisVersionUpdateFlowBuilder(BaseRedisTicketFlowBuilder): + serializer = RedisVersionUpdateDetailSerializer + inner_flow_builder = RedisVersionUpdateFlowParamBuilder + inner_flow_name = _("redis 集群版本升级") diff --git a/dbm-ui/backend/ticket/builders/tendbcluster/tendb_partition.py b/dbm-ui/backend/ticket/builders/tendbcluster/tendb_partition.py index f252eca105..ce50eff76e 100644 --- a/dbm-ui/backend/ticket/builders/tendbcluster/tendb_partition.py +++ b/dbm-ui/backend/ticket/builders/tendbcluster/tendb_partition.py @@ -13,7 +13,7 @@ from backend.flow.engine.controller.spider import SpiderController from backend.ticket import builders -from backend.ticket.builders.mysql.mysql_partition import MySQLPartitionDetailSerializer +from backend.ticket.builders.mysql.mysql_partition import MySQLPartitionDetailSerializer, MySQLPartitionParamBuilder from backend.ticket.builders.tendbcluster.base import BaseTendbTicketFlowBuilder, TendbBaseOperateDetailSerializer from backend.ticket.constants import TicketType @@ -22,11 +22,11 @@ class SpiderPartitionDetailSerializer(MySQLPartitionDetailSerializer, TendbBaseO pass -class SpiderPartitionParamBuilder(builders.FlowParamBuilder): +class SpiderPartitionParamBuilder(MySQLPartitionParamBuilder): controller = SpiderController.spider_partition def format_ticket_data(self): - pass + super().format_ticket_data() @builders.BuilderFactory.register(TicketType.TENDBCLUSTER_PARTITION) diff --git a/dbm-ui/backend/ticket/constants.py b/dbm-ui/backend/ticket/constants.py index 29300ece7a..91e48989a6 100644 --- a/dbm-ui/backend/ticket/constants.py +++ b/dbm-ui/backend/ticket/constants.py @@ -156,7 +156,7 @@ def get_db_type_by_ticket(cls, ticket_type): MYSQL_ADD_SLAVE = TicketEnumField("MYSQL_ADD_SLAVE", _("MySQL 添加从库"), _("集群维护")) MYSQL_RESTORE_SLAVE = TicketEnumField("MYSQL_RESTORE_SLAVE", _("MySQL Slave重建"), _("集群维护")) MYSQL_RESTORE_LOCAL_SLAVE = TicketEnumField("MYSQL_RESTORE_LOCAL_SLAVE", _("MySQL Slave原地重建"), _("集群维护")) - MYSQL_MIGRATE_CLUSTER = TicketEnumField("MYSQL_MIGRATE_CLUSTER", _("MySQL 克隆主从"), _("集群维护")) + MYSQL_MIGRATE_CLUSTER = TicketEnumField("MYSQL_MIGRATE_CLUSTER", _("MySQL 迁移主从"), _("集群维护")) MYSQL_MASTER_SLAVE_SWITCH = TicketEnumField("MYSQL_MASTER_SLAVE_SWITCH", _("MySQL 主从互换"), _("集群维护")) MYSQL_MASTER_FAIL_OVER = TicketEnumField("MYSQL_MASTER_FAIL_OVER", _("MySQL 主库故障切换"), _("集群维护")) MYSQL_HA_APPLY = TicketEnumField("MYSQL_HA_APPLY", _("MySQL 高可用部署"), register_iam=False) diff --git a/dbm-ui/config/default.py b/dbm-ui/config/default.py index 42501841e5..d1c6f0d5f3 100644 --- a/dbm-ui/config/default.py +++ b/dbm-ui/config/default.py @@ -129,7 +129,7 @@ # 跨域中间件 "corsheaders.middleware.CorsMiddleware", # 接口耗时调试工具 - # "pyinstrument.middleware.ProfilerMiddleware", + "pyinstrument.middleware.ProfilerMiddleware", # JWT认证,透传的应用信息,透传的用户信息 "apigw_manager.apigw.authentication.ApiGatewayJWTGenericMiddleware", "apigw_manager.apigw.authentication.ApiGatewayJWTAppMiddleware", diff --git a/dbm-ui/frontend/src/common/const.ts b/dbm-ui/frontend/src/common/const.ts index 65c85fca73..78b3681bae 100644 --- a/dbm-ui/frontend/src/common/const.ts +++ b/dbm-ui/frontend/src/common/const.ts @@ -167,8 +167,10 @@ export enum TicketTypes { MYSQL_CLIENT_CLONE_RULES = 'MYSQL_CLIENT_CLONE_RULES', MYSQL_RESTORE_LOCAL_SLAVE = 'MYSQL_RESTORE_LOCAL_SLAVE', MYSQL_HA_RENAME_DATABASE = 'MYSQL_HA_RENAME_DATABASE', + MYSQL_SINGLE_RENAME_DATABASE = 'MYSQL_SINGLE_RENAME_DATABASE', MYSQL_ADD_SLAVE = 'MYSQL_ADD_SLAVE', MYSQL_HA_TRUNCATE_DATA = 'MYSQL_HA_TRUNCATE_DATA', + MYSQL_SINGLE_TRUNCATE_DATA = 'MYSQL_SINGLE_TRUNCATE_DATA', MYSQL_CHECKSUM = 'MYSQL_CHECKSUM', TENDBCLUSTER_APPLY = 'TENDBCLUSTER_APPLY', REDIS_CLUSTER_APPLY = 'REDIS_CLUSTER_APPLY', @@ -251,6 +253,7 @@ export enum TicketTypes { REDIS_CLUSTER_TYPE_UPDATE = 'REDIS_CLUSTER_TYPE_UPDATE', // redis 集群类型变更 REDIS_DATACOPY_CHECK_REPAIR = 'REDIS_DATACOPY_CHECK_REPAIR', // redis 数据校验与修复 REDIS_CLUSTER_ROLLBACK_DATA_COPY = 'REDIS_CLUSTER_ROLLBACK_DATA_COPY', // redis 数据回写 + REDIS_CLUSTER_VERSION_UPDATE_ONLINE = 'REDIS_CLUSTER_VERSION_UPDATE_ONLINE', // redis 版本升级 TENDBCLUSTER_DISABLE = 'TENDBCLUSTER_DISABLE', // spider 集群禁用 TENDBCLUSTER_ENABLE = 'TENDBCLUSTER_ENABLE', // spider 集群启用 TENDBCLUSTER_DESTROY = 'TENDBCLUSTER_DESTROY', // spider 集群下架 diff --git a/dbm-ui/frontend/src/components/cluster-selector/Index.vue b/dbm-ui/frontend/src/components/cluster-selector/Index.vue index 2457b78be4..a0ebb2351a 100644 --- a/dbm-ui/frontend/src/components/cluster-selector/Index.vue +++ b/dbm-ui/frontend/src/components/cluster-selector/Index.vue @@ -64,7 +64,7 @@ v-for="tabItem of tabList" :key="tabItem.id" ref="tabTipsRef" - disabled + :disabled="!onlyOneType" theme="light">
{ + Object.assign(results, { + [id]: {}, + }); + return results; + }, {} as Record>); + } }; /** diff --git a/dbm-ui/frontend/src/components/system-search/Index.vue b/dbm-ui/frontend/src/components/system-search/Index.vue index fcdf7901f3..0ad63ed423 100644 --- a/dbm-ui/frontend/src/components/system-search/Index.vue +++ b/dbm-ui/frontend/src/components/system-search/Index.vue @@ -1,3 +1,16 @@ + +