From 5ee5ee218817df493946463fa16ebc008434ab59 Mon Sep 17 00:00:00 2001 From: xfan0805 Date: Wed, 6 Sep 2023 21:01:34 +0800 Subject: [PATCH] =?UTF-8?q?feat:=20=E6=96=B0=E5=A2=9Emysql=E5=BC=80?= =?UTF-8?q?=E5=8C=BA=E5=8A=9F=E8=83=BD=20#1036?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../mysql/db-partition/handler/handler.go | 3 +- .../service/execute_partition_object.go | 16 + .../db-partition/service/manage_config.go | 62 ++-- .../mysql/db-tools/dbactuator/.gitignore | 5 +- .../internal/subcmd/mysqlcmd/mysqlcmd.go | 4 + .../subcmd/mysqlcmd/open_area_dump_data.go | 96 +++++ .../subcmd/mysqlcmd/open_area_dump_schema.go | 98 +++++ .../subcmd/mysqlcmd/open_area_import_data.go | 96 +++++ .../mysqlcmd/open_area_import_schema.go | 103 ++++++ .../components/mysql/open_area_dump_schema.go | 335 +++++++++++++++++ .../mysql/open_area_import_schema.go | 278 ++++++++++++++ .../backend/db_meta/models/mysql_open_area.py | 57 +++ dbm-ui/backend/flow/consts.py | 4 + .../scene/mysql/mysql_open_area_flow.py | 349 ++++++++++++++++++ .../backend/flow/engine/controller/mysql.py | 5 + dbm-ui/backend/flow/urls.py | 2 + .../flow/utils/mysql/mysql_act_playload.py | 137 +++++++ dbm-ui/backend/flow/views/mysql_open_area.py | 31 ++ 18 files changed, 1657 insertions(+), 24 deletions(-) create mode 100644 dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/open_area_dump_data.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/open_area_dump_schema.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/open_area_import_data.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/open_area_import_schema.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/open_area_dump_schema.go create mode 100644 dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/open_area_import_schema.go create mode 100644 dbm-ui/backend/db_meta/models/mysql_open_area.py create mode 100644 dbm-ui/backend/flow/engine/bamboo/scene/mysql/mysql_open_area_flow.py create mode 100644 dbm-ui/backend/flow/views/mysql_open_area.py diff --git a/dbm-services/mysql/db-partition/handler/handler.go b/dbm-services/mysql/db-partition/handler/handler.go index debfe62ca6..6aebb5f7b4 100644 --- a/dbm-services/mysql/db-partition/handler/handler.go +++ b/dbm-services/mysql/db-partition/handler/handler.go @@ -2,7 +2,6 @@ package handler import ( - "dbm-services/mysql/db-partition/cron" "errors" "fmt" "net/http" @@ -11,6 +10,8 @@ import ( "strings" "time" + "dbm-services/mysql/db-partition/cron" + cron_pkg "github.com/robfig/cron/v3" "dbm-services/common/go-pubpkg/errno" diff --git a/dbm-services/mysql/db-partition/service/execute_partition_object.go b/dbm-services/mysql/db-partition/service/execute_partition_object.go index dd481b5b8c..9a3458b3c9 100644 --- a/dbm-services/mysql/db-partition/service/execute_partition_object.go +++ b/dbm-services/mysql/db-partition/service/execute_partition_object.go @@ -1,3 +1,4 @@ +// Package service TODO package service import ( @@ -117,3 +118,18 @@ type PartitionObject struct { ShardName string `json:"shard_name"` ExecuteObjects []PartitionSql `json:"execute_objects"` } + +// PartitionLogs TODO +type PartitionLogs struct { + ID int `gorm:"column:id;primary_key;auto_increment` + BkBizId int `gorm:"column:bk_biz_id"` + Operator string `gorm:"column:operator"` + Para string `gorm:"column:para"` + ExecuteTime time.Time `gorm:"column:execute_time"` +} + +// PartitionLogsParam TODO +type PartitionLogsParam struct { + Para string + Err error +} diff --git a/dbm-services/mysql/db-partition/service/manage_config.go b/dbm-services/mysql/db-partition/service/manage_config.go index e41f9badff..55551b83e5 100644 --- a/dbm-services/mysql/db-partition/service/manage_config.go +++ b/dbm-services/mysql/db-partition/service/manage_config.go @@ -11,6 +11,7 @@ package service import ( + "encoding/json" "errors" "fmt" "regexp" @@ -305,6 +306,7 @@ func (m *CreatePartitionsInput) UpdatePartitionsConfig() error { reservedPartition := m.ExpireTime / m.PartitionTimeInterval partitionType := 0 + switch m.PartitionColumnType { case "datetime": partitionType = 0 @@ -318,37 +320,53 @@ func (m *CreatePartitionsInput) UpdatePartitionsConfig() error { var errs []string for _, dblike := range m.DbLikes { for _, tblike := range m.TbLikes { - updateCondition := fmt.Sprintf("bk_biz_id=%d and immute_domain='%s' and dblike='%s' and tblike='%s'", - m.BkBizId, m.ImmuteDomain, dblike, tblike) - var updateColumn struct { - PartitionColumn string - PartitionColumnType string - ReservedPartition int - ExtraPartition int - PartitionTimeInterval int - PartitionType int - ExpireTime int - Creator string - Updator string + update_column_map := map[string]interface{}{ + "partition_column": m.PartitionColumn, + "partition_column_type": m.PartitionColumnType, + "reserved_partition": reservedPartition, + "extra_partition": extraTime, + "partition_time_interval": m.PartitionTimeInterval, + "partition_type": partitionType, + "expire_time": m.ExpireTime, + "updator": m.Updator, + "update_time": time.Now(), + } + result := model.DB.Self.Debug().Table(tbName). + Where( + "bk_biz_id=? and immute_domain=? and dblike=? and tblike=?", + m.BkBizId, m.ImmuteDomain, dblike, tblike). + Updates(update_column_map) + var para PartitionLogsParam + jString, jerr := json.Marshal(update_column_map) + if jerr != nil { + return jerr } - updateColumn.PartitionColumn = m.PartitionColumn - updateColumn.PartitionColumnType = m.PartitionColumnType - updateColumn.ReservedPartition = reservedPartition - updateColumn.ExtraPartition = extraTime - updateColumn.PartitionTimeInterval = m.PartitionTimeInterval - updateColumn.PartitionType = partitionType - updateColumn.ExpireTime = m.ExpireTime - updateColumn.Creator = m.Creator - updateColumn.Updator = m.Updator - result := model.DB.Self.Debug().Table(tbName).Where(updateCondition).Updates(&updateColumn) + para.Para = string(jString) if result.Error != nil { errs = append(errs, result.Error.Error()) + para.Err = result.Error + } + var plog PartitionLogs + plog.BkBizId = m.BkBizId + plog.ExecuteTime = time.Now() + plog.Operator = m.Updator + + jString, jerr = json.Marshal(update_column_map) + if jerr != nil { + return jerr + } + plog.Para = string(jString) + res := model.DB.Self.Debug().Create(&plog) + if res.Error != nil { + return res.Error } } } + if len(errs) > 0 { return fmt.Errorf("errors: %s", strings.Join(errs, "\n")) } + return nil } diff --git a/dbm-services/mysql/db-tools/dbactuator/.gitignore b/dbm-services/mysql/db-tools/dbactuator/.gitignore index 01b81f4b16..3810e2c293 100644 --- a/dbm-services/mysql/db-tools/dbactuator/.gitignore +++ b/dbm-services/mysql/db-tools/dbactuator/.gitignore @@ -25,4 +25,7 @@ sync_test.sh .vscode/ scripts/upload_media.sh scripts/upload.sh -localtest/ \ No newline at end of file +localtest/ +.codecc +.idea +.vscode \ No newline at end of file diff --git a/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/mysqlcmd.go b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/mysqlcmd.go index 114dc98975..b7a5215f6e 100644 --- a/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/mysqlcmd.go +++ b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/mysqlcmd.go @@ -58,6 +58,10 @@ func NewMysqlCommand() *cobra.Command { NewDropTableCommand(), InstallBackupClientCommand(), NewEnableTokudbPluginCommand(), + NewOpenAreaDumpSchemaCommand(), + NewOpenAreaImportSchemaCommand(), + NewOpenAreaDumpData(), + NewOpenAreaImportData(), }, }, { diff --git a/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/open_area_dump_data.go b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/open_area_dump_data.go new file mode 100644 index 0000000000..ae6688b208 --- /dev/null +++ b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/open_area_dump_data.go @@ -0,0 +1,96 @@ +/* + * TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-DB管理系统(BlueKing-BK-DBM) available. + * Copyright (C) 2017-2023 THL A29 Limited, a Tencent company. All rights reserved. + * Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at https://opensource.org/licenses/MIT + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ + +package mysqlcmd + +import ( + "fmt" + + "dbm-services/common/go-pubpkg/logger" + "dbm-services/mysql/db-tools/dbactuator/internal/subcmd" + "dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql" + "dbm-services/mysql/db-tools/dbactuator/pkg/util" + + "github.com/spf13/cobra" +) + +// OpenAreaDumpDataAct TODO +type OpenAreaDumpDataAct struct { + *subcmd.BaseOptions + Service mysql.OpenAreaDumpSchemaComp +} + +// NewOpenAreaDumpData TODO +func NewOpenAreaDumpData() *cobra.Command { + act := OpenAreaDumpDataAct{ + BaseOptions: subcmd.GBaseOptions, + } + cmd := &cobra.Command{ + Use: "open_area_dumpdata", + Short: "开区导出数据", + Example: fmt.Sprintf( + `dbactuator mysql open_area_dumpdata %s %s`, + subcmd.CmdBaseExampleStr, subcmd.ToPrettyJson(act.Service.Example()), + ), + Run: func(cmd *cobra.Command, args []string) { + util.CheckErr(act.Validate()) + util.CheckErr(act.Init()) + util.CheckErr(act.Run()) + }, + } + return cmd +} + +// Validate TODO +func (d *OpenAreaDumpDataAct) Validate() (err error) { + return d.BaseOptions.Validate() +} + +// Init TODO +func (d *OpenAreaDumpDataAct) Init() (err error) { + if err = d.Deserialize(&d.Service.Params); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + // d.Deserialize方法执行后,并直接返回值, + d.Service.GeneralParam = subcmd.GeneralRuntimeParam + return nil +} + +// Run TODO +func (d *OpenAreaDumpDataAct) Run() (err error) { + steps := subcmd.Steps{ + { + FunName: "init", + Func: d.Service.Init, + }, + { + FunName: "precheck", + Func: d.Service.Precheck, + }, + { + FunName: "运行导出指定表数据", + Func: d.Service.OpenAreaDumpData, + }, + { + FunName: "压缩开区文件", + Func: d.Service.CompressDumpDir, + }, + { + FunName: "上传指定表数据文件", + Func: d.Service.Upload, + }, + } + if err := steps.Run(); err != nil { + return err + } + logger.Info("开区导出表数据成功") + return nil +} diff --git a/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/open_area_dump_schema.go b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/open_area_dump_schema.go new file mode 100644 index 0000000000..327cf53c89 --- /dev/null +++ b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/open_area_dump_schema.go @@ -0,0 +1,98 @@ +/* + * TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-DB管理系统(BlueKing-BK-DBM) available. + * Copyright (C) 2017-2023 THL A29 Limited, a Tencent company. All rights reserved. + * Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at https://opensource.org/licenses/MIT + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ + +package mysqlcmd + +import ( + "fmt" + + "dbm-services/common/go-pubpkg/logger" + "dbm-services/mysql/db-tools/dbactuator/internal/subcmd" + "dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql" + "dbm-services/mysql/db-tools/dbactuator/pkg/util" + + "github.com/spf13/cobra" +) + +// OpenAreaDumpSchemaAct TODO +type OpenAreaDumpSchemaAct struct { + *subcmd.BaseOptions + Service mysql.OpenAreaDumpSchemaComp +} + +// NewOpenAreaDumpSchemaCommand TODO +func NewOpenAreaDumpSchemaCommand() *cobra.Command { + // *subcmd.BaseOptions是指针变量,需要初始化, subcmd.GBaseOptions在subcmd的init中已被初始化 + act := OpenAreaDumpSchemaAct{ + BaseOptions: subcmd.GBaseOptions, + } + + cmd := &cobra.Command{ + Use: "open_area_dumpschema", + Short: "开区导出表结构", + Example: fmt.Sprintf( + `dbactuator mysql open_area_dumpschema %s %s`, + subcmd.CmdBaseExampleStr, subcmd.ToPrettyJson(act.Service.Example()), + ), + Run: func(cmd *cobra.Command, args []string) { + util.CheckErr(act.Validate()) + util.CheckErr(act.Init()) + util.CheckErr(act.Run()) + }, + } + return cmd +} + +// Validate TODO +func (d *OpenAreaDumpSchemaAct) Validate() (err error) { + return d.BaseOptions.Validate() +} + +// Init TODO +func (d *OpenAreaDumpSchemaAct) Init() (err error) { + if err = d.Deserialize(&d.Service.Params); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + // d.Deserialize方法执行后,并直接返回值, + d.Service.GeneralParam = subcmd.GeneralRuntimeParam + return nil +} + +// Run TODO +func (d *OpenAreaDumpSchemaAct) Run() (err error) { + steps := subcmd.Steps{ + { + FunName: "init", + Func: d.Service.Init, + }, + { + FunName: "precheck", + Func: d.Service.Precheck, + }, + { + FunName: "运行导出表结构", + Func: d.Service.OpenAreaDumpSchema, + }, + { + FunName: "压缩开区文件", + Func: d.Service.CompressDumpDir, + }, + { + FunName: "上传表结构", + Func: d.Service.Upload, + }, + } + if err := steps.Run(); err != nil { + return err + } + logger.Info("开区导出表结构成功") + return nil +} diff --git a/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/open_area_import_data.go b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/open_area_import_data.go new file mode 100644 index 0000000000..9bc4d81c5b --- /dev/null +++ b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/open_area_import_data.go @@ -0,0 +1,96 @@ +/* + * TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-DB管理系统(BlueKing-BK-DBM) available. + * Copyright (C) 2017-2023 THL A29 Limited, a Tencent company. All rights reserved. + * Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at https://opensource.org/licenses/MIT + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ + +package mysqlcmd + +import ( + "fmt" + + "dbm-services/common/go-pubpkg/logger" + "dbm-services/mysql/db-tools/dbactuator/internal/subcmd" + "dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql" + "dbm-services/mysql/db-tools/dbactuator/pkg/util" + + "github.com/spf13/cobra" +) + +// OpenAreaImportDataAct TODO +type OpenAreaImportDataAct struct { + *subcmd.BaseOptions + Service mysql.OpenAreaImportSchemaComp +} + +// NewOpenAreaImportData TODO +func NewOpenAreaImportData() *cobra.Command { + act := OpenAreaImportDataAct{ + BaseOptions: subcmd.GBaseOptions, + } + cmd := &cobra.Command{ + Use: "open_area_importdata", + Short: "开区导入数据", + Example: fmt.Sprintf( + `dbactuator mysql open_area_importschema %s %s`, + subcmd.CmdBaseExampleStr, subcmd.ToPrettyJson(act.Service.Example()), + ), + Run: func(cmd *cobra.Command, args []string) { + util.CheckErr(act.Validate()) + util.CheckErr(act.Init()) + util.CheckErr(act.Run()) + }, + } + return cmd +} + +// Validate TODO +func (d *OpenAreaImportDataAct) Validate() (err error) { + return d.BaseOptions.Validate() +} + +// Init TODO +func (d *OpenAreaImportDataAct) Init() (err error) { + if err = d.Deserialize(&d.Service.Params); err != nil { + logger.Info("DeserializeAndValidate failed, %v", err) + return err + } + d.Service.GeneralParam = subcmd.GeneralRuntimeParam + return +} + +// Run TODO +func (d *OpenAreaImportDataAct) Run() (err error) { + steps := subcmd.Steps{ + { + FunName: "init", + Func: d.Service.Init, + }, + { + FunName: "precheck", + Func: d.Service.Precheck, + }, + { + FunName: "解压data文件", + Func: d.Service.DecompressDumpDir, + }, + { + FunName: "导入数据文件", + Func: d.Service.OpenAreaImportData, + }, + { + FunName: "清除dump目录", + Func: d.Service.CleanDumpDir, + }, + } + + if err := steps.Run(); err != nil { + return err + } + logger.Info("开区导入数据成功") + return +} diff --git a/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/open_area_import_schema.go b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/open_area_import_schema.go new file mode 100644 index 0000000000..2c110a23b7 --- /dev/null +++ b/dbm-services/mysql/db-tools/dbactuator/internal/subcmd/mysqlcmd/open_area_import_schema.go @@ -0,0 +1,103 @@ +/* + * TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-DB管理系统(BlueKing-BK-DBM) available. + * Copyright (C) 2017-2023 THL A29 Limited, a Tencent company. All rights reserved. + * Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at https://opensource.org/licenses/MIT + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ + +package mysqlcmd + +import ( + "fmt" + + "dbm-services/common/go-pubpkg/logger" + "dbm-services/mysql/db-tools/dbactuator/internal/subcmd" + "dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql" + "dbm-services/mysql/db-tools/dbactuator/pkg/util" + + "github.com/spf13/cobra" +) + +// OpenAreaImportSchemaAct TODO +type OpenAreaImportSchemaAct struct { + *subcmd.BaseOptions + Service mysql.OpenAreaImportSchemaComp +} + +// NewOpenAreaImportSchemaCommand TODO +func NewOpenAreaImportSchemaCommand() *cobra.Command { + act := OpenAreaImportSchemaAct{ + BaseOptions: subcmd.GBaseOptions, + } + cmd := &cobra.Command{ + Use: "open_area_importschema", + Short: "开区导入表结构", + Example: fmt.Sprintf( + `dbactuator mysql open_area_importschema %s %s`, + subcmd.CmdBaseExampleStr, subcmd.ToPrettyJson(act.Service.Example()), + ), + Run: func(cmd *cobra.Command, args []string) { + util.CheckErr(act.Validate()) + util.CheckErr(act.Init()) + util.CheckErr(act.Run()) + }, + } + return cmd +} + +// Validate TODO +func (d *OpenAreaImportSchemaAct) Validate() (err error) { + return d.BaseOptions.Validate() +} + +// Init TODO +func (d *OpenAreaImportSchemaAct) Init() (err error) { + if err = d.Deserialize(&d.Service.Params); err != nil { + logger.Error("DeserializeAndValidate failed, %v", err) + return err + } + d.Service.GeneralParam = subcmd.GeneralRuntimeParam + return +} + +// Run TODO +func (d *OpenAreaImportSchemaAct) Run() (err error) { + steps := subcmd.Steps{ + { + FunName: "init", + Func: d.Service.Init, + }, + { + FunName: "precheck", + Func: d.Service.Precheck, + }, + { + FunName: "解压schema文件", + Func: d.Service.DecompressDumpDir, + }, + { + FunName: "抹除AutoIncrement", + Func: d.Service.EraseAutoIncrement, + }, + { + FunName: "创建新库", + Func: d.Service.CreateNewDatabase, + }, + { + FunName: "导入表结构文件", + Func: d.Service.OpenAreaImportSchema, + }, + { + FunName: "清除dump目录", + Func: d.Service.CleanDumpDir, + }, + } + if err := steps.Run(); err != nil { + return err + } + logger.Info("开区导入表结构成功") + return +} diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/open_area_dump_schema.go b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/open_area_dump_schema.go new file mode 100644 index 0000000000..56b9041b40 --- /dev/null +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/open_area_dump_schema.go @@ -0,0 +1,335 @@ +/* + * TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-DB管理系统(BlueKing-BK-DBM) available. + * Copyright (C) 2017-2023 THL A29 Limited, a Tencent company. All rights reserved. + * Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at https://opensource.org/licenses/MIT + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ + +package mysql + +import ( + "encoding/json" + "fmt" + "net/url" + "os" + "path" + "reflect" + "strings" + + "dbm-services/common/go-pubpkg/bkrepo" + "dbm-services/common/go-pubpkg/logger" + "dbm-services/mysql/db-tools/dbactuator/pkg/components" + "dbm-services/mysql/db-tools/dbactuator/pkg/core/cst" + "dbm-services/mysql/db-tools/dbactuator/pkg/native" + "dbm-services/mysql/db-tools/dbactuator/pkg/util/mysqlutil" + "dbm-services/mysql/db-tools/dbactuator/pkg/util/osutil" +) + +// OpenAreaDumpSchemaComp TODO +type OpenAreaDumpSchemaComp struct { + GeneralParam *components.GeneralParam `json:"general"` + Params OpenAreaDumpSchemaParam `json:"extend"` + OpenAreaDumpSchemaRunTimeCtx `json:"-"` +} + +// OpenAreaDumpSchemaParam TODO +type OpenAreaDumpSchemaParam struct { + Host string `json:"host" validate:"required,ip"` // 当前实例的主机地址 + Port int `json:"port" validate:"required,lt=65536,gte=3306"` // 当前实例的端口 + CharSet string `json:"charset" validate:"required,checkCharset"` // 字符集参数 传default过来,按照源数据库的字符集 + RootId string `json:"root_id"` + BkCloudId int `json:"bk_cloud_id"` + DBCloudToken string `json:"db_cloud_token"` + DumpDirName string `json:"dump_dir_name"` // dump目录名称 {}_schema {}_data + FileServer FileServer `json:"fileserver"` + OpenAreaParam []OneOpenAreaSchema `json:"open_area_param"` +} + +// OneOpenAreaSchema 用于存放一个区库表的信息 +type OneOpenAreaSchema struct { + Schema string `json:"schema"` // 指定dump的库 + Tbales []string `json:"tables"` +} + +// OpenAreaDumpSchemaRunTimeCtx TODO +type OpenAreaDumpSchemaRunTimeCtx struct { + charset string // 当前实例的字符集 + dumpCmd string // 中控位置不一样 + isSpider bool // 是否spider中控 + dumpDirPath string // dump目录绝对路径 + tarName string // 压缩文件名称 {}.tar.gz + workDir string // schema目录所在的位置 即位于/data/install/mysql_open_area + uploadFile []UploadFile +} + +// UploadFile TODO +type UploadFile struct { + FilePath string // 上传文件的绝对路径 + FileName string // 上传文件的名称 +} + +// Example TODO +func (c *OpenAreaDumpSchemaComp) Example() interface{} { + comp := OpenAreaDumpSchemaComp{ + Params: OpenAreaDumpSchemaParam{ + Host: "0.0.0.0", + Port: 3306, + CharSet: "default", + RootId: "xxxxxxx", + OpenAreaParam: []OneOpenAreaSchema{ + { + Schema: "data1", + Tbales: []string{"tb1", "tb2"}, + }, + { + Schema: "data2", + Tbales: []string{"tb1", "tb2"}, + }, + }, + }, + } + return comp +} + +// Init TODO +func (c *OpenAreaDumpSchemaComp) Init() (err error) { + // 连接实例,查询版本和字符集 同时根据版本确认是否为中控 + conn, err := native.InsObject{ + Host: c.Params.Host, + Port: c.Params.Port, + User: c.GeneralParam.RuntimeAccountParam.AdminUser, + Pwd: c.GeneralParam.RuntimeAccountParam.AdminPwd, + }.Conn() + if err != nil { + logger.Error("Connect %d failed:%s", c.Params.Port, err.Error()) + return err + } + // 获取版本,下面通过版本判断是否是中控节点 + version, err := conn.SelectVersion() + if err != nil { + logger.Error("获取version failed %s", err.Error()) + return err + } + c.isSpider = strings.Contains(version, "tdbctl") + c.charset = c.Params.CharSet + if c.Params.CharSet == "default" { + if c.charset, err = conn.ShowServerCharset(); err != nil { + logger.Error("获取实例的字符集失败:%s", err.Error()) + return err + } + } + c.workDir = path.Join(cst.BK_PKG_INSTALL_PATH, "mysql_open_area") + c.dumpDirPath = path.Join(c.workDir, c.Params.DumpDirName) + c.tarName = fmt.Sprintf("%s.tar.gz", c.Params.DumpDirName) + err = os.MkdirAll(c.dumpDirPath, 0755) + if err != nil { + logger.Error("开区目录创建失败!%s", err.Error()) + return err + } + + return nil +} + +// Precheck TODO +func (c *OpenAreaDumpSchemaComp) Precheck() (err error) { + c.dumpCmd = path.Join(cst.MysqldInstallPath, "bin", "mysqldump") + if c.isSpider { + c.dumpCmd = path.Join(cst.TdbctlInstallPath, "bin", "mysqldump") + } + if !osutil.FileExist(c.dumpCmd) { + return fmt.Errorf("dumpCmd: %s文件不存在", c.dumpCmd) + } + return +} + +// OpenAreaDumpSchema TODO +func (c *OpenAreaDumpSchemaComp) OpenAreaDumpSchema() (err error) { + + for _, oneOpenAreaSchema := range c.Params.OpenAreaParam { + var dumper mysqlutil.Dumper + outputfileName := fmt.Sprintf("%s.sql", oneOpenAreaSchema.Schema) + schema := fmt.Sprintf("%s %s", + oneOpenAreaSchema.Schema, strings.Join(oneOpenAreaSchema.Tbales, " "), + ) + + dumper = &mysqlutil.MySQLDumperTogether{ + MySQLDumper: mysqlutil.MySQLDumper{ + DumpDir: c.dumpDirPath, + Ip: c.Params.Host, + Port: c.Params.Port, + DbBackupUser: c.GeneralParam.RuntimeAccountParam.AdminUser, + DbBackupPwd: c.GeneralParam.RuntimeAccountParam.AdminPwd, + DbNames: []string{schema}, + DumpCmdFile: c.dumpCmd, + Charset: c.charset, + MySQLDumpOption: mysqlutil.MySQLDumpOption{ + NoData: true, + AddDropTable: false, + DumpRoutine: true, + DumpTrigger: false, + GtidPurgedOff: true, + }, + }, + OutputfileName: outputfileName, + } + if err := dumper.Dump(); err != nil { + logger.Error("dump failed: ", err.Error()) + return err + } + } + + return nil +} + +// OpenAreaDumpData TODO +func (c *OpenAreaDumpSchemaComp) OpenAreaDumpData() (err error) { + + for _, oneOpenAreaSchema := range c.Params.OpenAreaParam { + var dumper mysqlutil.Dumper + if len(oneOpenAreaSchema.Tbales) == 0 { + + continue + } + outputfileName := fmt.Sprintf("%s.sql", oneOpenAreaSchema.Schema) + schema := fmt.Sprintf("%s %s", + oneOpenAreaSchema.Schema, strings.Join(oneOpenAreaSchema.Tbales, " "), + ) + + dumper = &mysqlutil.MySQLDumperTogether{ + MySQLDumper: mysqlutil.MySQLDumper{ + DumpDir: c.dumpDirPath, + Ip: c.Params.Host, + Port: c.Params.Port, + DbBackupUser: c.GeneralParam.RuntimeAccountParam.AdminUser, + DbBackupPwd: c.GeneralParam.RuntimeAccountParam.AdminPwd, + DbNames: []string{schema}, + DumpCmdFile: c.dumpCmd, + Charset: c.charset, + MySQLDumpOption: mysqlutil.MySQLDumpOption{ + NoData: false, + AddDropTable: false, + NeedUseDb: false, + NoCreateTb: true, + DumpRoutine: false, + }, + }, + OutputfileName: outputfileName, + } + if err := dumper.Dump(); err != nil { + logger.Error("dump failed: ", err.Error()) + return err + } + } + + return nil +} + +// CompressDumpDir TODO +func (c *OpenAreaDumpSchemaComp) CompressDumpDir() (err error) { + // // 如果不上传制品库,则不用压缩 + // if reflect.DeepEqual(c.Params.FileServer, FileServer{}) { + // logger.Info("the fileserver parameter is empty no upload is required ~") + // return nil + // } + // tarPath是开区目录压缩文件的绝对路径 + tarPath := path.Join(c.workDir, c.tarName) + schemaInfo := UploadFile{ + FilePath: tarPath, + FileName: c.tarName, + } + c.uploadFile = append(c.uploadFile, schemaInfo) + tarCmd := fmt.Sprintf("tar -zcf %s -C %s %s", tarPath, c.workDir, c.Params.DumpDirName) + output, err := osutil.ExecShellCommand(false, tarCmd) + if err != nil { + logger.Error("execute(%s) get an error:%s,%s", tarCmd, output, err.Error()) + return err + } + + // 获取压缩文件的MD5 + md5Val, err := osutil.GetFileMd5(tarPath) + if err != nil { + logger.Error("Failed to obtain the MD5 value of the file!Error:%s", err.Error()) + return err + } + md5FileName := fmt.Sprintf("%s.md5sum", c.Params.DumpDirName) + md5FilePath := path.Join(c.workDir, md5FileName) + md5Info := UploadFile{ + FilePath: md5FilePath, + FileName: md5FileName, + } + c.uploadFile = append(c.uploadFile, md5Info) + md5File, err := os.Create(md5FilePath) + if err != nil { + logger.Error("create file(%s) get an error:%s", md5FileName, err.Error()) + return err + } + defer md5File.Close() + _, err = md5File.WriteString(md5Val) + if err != nil { + logger.Error("Write md5 value(%s) to file(%s) error: %s", md5Val, md5FileName, err.Error()) + return err + } + return nil +} + +// Upload TODO +func (c *OpenAreaDumpSchemaComp) Upload() (err error) { + // 这里不传FileServer相关内容,则不会上传到制品库 + if reflect.DeepEqual(c.Params.FileServer, FileServer{}) { + logger.Info("the fileserver parameter is empty no upload is required ~") + return nil + } + + for _, uf := range c.uploadFile { + r := path.Join("generic", c.Params.FileServer.Project, c.Params.FileServer.Bucket, c.Params.FileServer.UploadPath) + uploadUrl, err := url.JoinPath(c.Params.FileServer.URL, r, "/") + if err != nil { + logger.Error("call url joinPath failed %s ", err.Error()) + return err + } + if c.Params.BkCloudId == 0 { + // 此处设置上传的路径,注意最后是待上传文件名,不是文件路径 + uploadUrl, err = url.JoinPath( + c.Params.FileServer.URL, path.Join( + "/generic", c.Params.FileServer.Project, + c.Params.FileServer.Bucket, c.Params.FileServer.UploadPath, uf.FileName, + ), + ) + if err != nil { + logger.Error("call url joinPath failed %s ", err.Error()) + return err + } + } + logger.Info("bk_cloud_id:%d,upload url:%s", c.Params.BkCloudId, uploadUrl) + resp, err := bkrepo.UploadFile( + uf.FilePath, uploadUrl, c.Params.FileServer.Username, c.Params.FileServer.Password, + c.Params.BkCloudId, c.Params.DBCloudToken, + ) + if err != nil { + logger.Error("upload sqlfile error %s", err.Error()) + return err + } + if resp.Code != 0 { + errMsg := fmt.Sprintf( + "upload respone code is %d,respone msg:%s,traceId:%s", + resp.Code, + resp.Message, + resp.RequestId, + ) + logger.Error(errMsg) + return fmt.Errorf(errMsg) + } + logger.Info("Resp: code:%d,msg:%s,traceid:%s", resp.Code, resp.Message, resp.RequestId) + var uploadRespdata bkrepo.UploadRespData + if err := json.Unmarshal(resp.Data, &uploadRespdata); err != nil { + logger.Error("unmarshal upload respone data failed %s", err.Error()) + return err + } + logger.Info("%v", uploadRespdata) + } + + return nil +} diff --git a/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/open_area_import_schema.go b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/open_area_import_schema.go new file mode 100644 index 0000000000..17a8cb8ed7 --- /dev/null +++ b/dbm-services/mysql/db-tools/dbactuator/pkg/components/mysql/open_area_import_schema.go @@ -0,0 +1,278 @@ +/* + * TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-DB管理系统(BlueKing-BK-DBM) available. + * Copyright (C) 2017-2023 THL A29 Limited, a Tencent company. All rights reserved. + * Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at https://opensource.org/licenses/MIT + * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on + * an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the + * specific language governing permissions and limitations under the License. + */ + +package mysql + +import ( + "fmt" + "os" + "path" + "regexp" + + "dbm-services/bigdata/db-tools/dbactuator/pkg/util" + "dbm-services/common/go-pubpkg/logger" + "dbm-services/mysql/db-tools/dbactuator/pkg/components" + "dbm-services/mysql/db-tools/dbactuator/pkg/core/cst" + "dbm-services/mysql/db-tools/dbactuator/pkg/native" + "dbm-services/mysql/db-tools/dbactuator/pkg/util/mysqlutil" + "dbm-services/mysql/db-tools/dbactuator/pkg/util/osutil" + + "github.com/pkg/errors" +) + +// OpenAreaImportSchemaComp TODO +type OpenAreaImportSchemaComp struct { + GeneralParam *components.GeneralParam `json:"general"` + Params OpenAreaImportSchemaParam `json:"extend"` + OpenAreaImportSchemaRunTimeCtx `json:"-"` +} + +// OpenAreaImportSchemaParam TODO +type OpenAreaImportSchemaParam struct { + Host string `json:"host" validate:"required,ip"` + Port int `json:"port" validate:"required,lt=65536,gte=3306"` + CharSet string `json:"charSet" validate:"required,checkCharset"` + RootId string `json:"root_id"` + BkCloudId int `json:"bk_cloud_id"` + DumpDirName string `json:"dump_dir_name"` // dump目录名称 {}_schema {}_data + DBCloudToken string `json:"db_cloud_token"` + OpenAreaParam []OneOpenAreaImportSchema `json:"open_area_param"` +} + +// OneOpenAreaImportSchema TODO +type OneOpenAreaImportSchema struct { + Schema string `json:"schema"` // 指定dump的库 + NewDB string `json:"newdb"` +} + +// OpenAreaImportSchemaRunTimeCtx TODO +type OpenAreaImportSchemaRunTimeCtx struct { + charset string // 当前实例的字符集 + workDir string + tarFilePath string + md5FilePath string + dumpDir string + conn *native.DbWorker + socket string +} + +// Example TODO +func (c *OpenAreaImportSchemaComp) Example() interface{} { + comp := OpenAreaImportSchemaComp{ + Params: OpenAreaImportSchemaParam{ + Host: "0.0.0.0", + Port: 3306, + CharSet: "default", + RootId: "xxxxxxx", + OpenAreaParam: []OneOpenAreaImportSchema{ + { + Schema: "data1", + NewDB: "data1-1001", + }, + { + Schema: "data2", + NewDB: "data2-1001", + }, + }, + }, + } + return comp +} + +// Init TODO +func (c *OpenAreaImportSchemaComp) Init() (err error) { + // 连接实例,确认字符集 + c.conn, err = native.InsObject{ + Host: c.Params.Host, + Port: c.Params.Port, + User: c.GeneralParam.RuntimeAccountParam.AdminUser, + Pwd: c.GeneralParam.RuntimeAccountParam.AdminPwd, + }.Conn() + if err != nil { + logger.Error("Connect %d failed:%s", c.Params.Port, err.Error()) + return err + } + c.charset = c.Params.CharSet + if c.Params.CharSet == "default" { + if c.charset, err = c.conn.ShowServerCharset(); err != nil { + logger.Error("获取实例的字符集失败:%s", err.Error()) + return err + } + } + c.socket, err = c.conn.ShowSocket() + if err != nil { + logger.Error("get socket failed!error:", err.Error()) + return err + } + c.workDir = path.Join(cst.BK_PKG_INSTALL_PATH, "mysql_open_area") + // 绝对路径 + tarFileName := fmt.Sprintf("%s.tar.gz", c.Params.DumpDirName) + c.tarFilePath = path.Join(c.workDir, tarFileName) + md5FileName := fmt.Sprintf("%s.md5sum", c.Params.DumpDirName) + c.md5FilePath = path.Join(c.workDir, md5FileName) + c.dumpDir = path.Join(c.workDir, c.Params.DumpDirName) + return +} + +// Precheck TODO +func (c *OpenAreaImportSchemaComp) Precheck() (err error) { + if !util.FileExists(c.tarFilePath) { + logger.Error("tar file(*s) does not exist.", c.tarFilePath) + return errors.New("压缩文件不存在") + } + if !util.FileExists(c.md5FilePath) { + logger.Error("tar file(*s) does not exist.", c.tarFilePath) + return errors.New("md5sum文件不存在") + } + return +} + +// DecompressDumpDir TODO +func (c *OpenAreaImportSchemaComp) DecompressDumpDir() (err error) { + md5Byte, err := os.ReadFile(c.md5FilePath) + if err != nil { + logger.Error("read md5sum(%s) file got an error:%s", c.md5FilePath, err.Error()) + return err + } + realMd5sumVal, err := osutil.GetFileMd5(c.tarFilePath) + if err != nil { + logger.Error("get real md5sum value failed!") + return err + } + sourceMd5sumVal := string(md5Byte) + if sourceMd5sumVal != realMd5sumVal { + msg := fmt.Sprintf("realMD5Sum(%s) is not equal to md5sum(%s) recored in the file(%s)", + realMd5sumVal, sourceMd5sumVal, c.md5FilePath) + logger.Error(msg) + return errors.New(msg) + } + logger.Info("get tar file sucess!") + decopressCmd := fmt.Sprintf("tar -zxf %s -C %s", c.tarFilePath, c.workDir) + output, err := osutil.ExecShellCommand(false, decopressCmd) + if err != nil { + logger.Error("execute(%s) get an error:%s,%s", decopressCmd, output, err.Error()) + return err + } + + return +} + +// EraseAutoIncrement TODO +func (c *OpenAreaImportSchemaComp) EraseAutoIncrement() (err error) { + for _, oneSchemaInfo := range c.Params.OpenAreaParam { + schemaFilePath := fmt.Sprintf("%s/%s.sql", c.dumpDir, oneSchemaInfo.Schema) + schemaContent, err := os.ReadFile(schemaFilePath) + if err != nil { + logger.Error("read file(%s) got an error:%s", schemaFilePath, err.Error()) + return err + } + reg, err := regexp.Compile(`(?i)AUTO_INCREMENT=(\d+)`) + if err != nil { + logger.Error("regexp.Compile failed:%s", err.Error()) + return err + } + reg2, err := regexp.Compile(`(?i)SET tc_admin=0`) + if err != nil { + logger.Error("regexp.Compile failed:%s", err.Error()) + return err + } + reg3, err := regexp.Compile(`\/\*!(.*?)\*\/;`) + if err != nil { + logger.Error("regexp.Compile failed:%s", err.Error()) + return err + } + newSchemaContent := reg.ReplaceAllString(string(schemaContent), "") + newSchemaContent = reg2.ReplaceAllString(newSchemaContent, "SET tc_admin=1") + newSchemaContent = reg3.ReplaceAllString(newSchemaContent, "") + newSchemaFilePath := fmt.Sprintf("%s.new", schemaFilePath) + + f, err := os.Create(newSchemaFilePath) + if err != nil { + logger.Error("create file(%s) error:%s", newSchemaFilePath, err.Error()) + return err + } + _, err = f.WriteString(newSchemaContent) + if err != nil { + logger.Error("write file(%s) error:%s", newSchemaFilePath, err.Error()) + return err + } + } + return nil +} + +// CreateNewDatabase TODO +func (c *OpenAreaImportSchemaComp) CreateNewDatabase() (err error) { + for _, oneShemaInfo := range c.Params.OpenAreaParam { + createDBSql := fmt.Sprintf("create database if not exists `%s` charset %s;", + oneShemaInfo.NewDB, c.charset) + _, err := c.conn.Exec(createDBSql) + if err != nil { + logger.Error("create db %s got an error:%s", oneShemaInfo.NewDB, err.Error()) + return err + } + } + return +} + +// OpenAreaImportSchema TODO +func (c *OpenAreaImportSchemaComp) OpenAreaImportSchema() (err error) { + for _, oneShemaInfo := range c.Params.OpenAreaParam { + schemaName := fmt.Sprintf("%s.sql.new", oneShemaInfo.Schema) + err = mysqlutil.ExecuteSqlAtLocal{ + IsForce: false, + Charset: c.charset, + NeedShowWarnings: false, + Host: c.Params.Host, + Port: c.Params.Port, + Socket: c.socket, + WorkDir: c.dumpDir, + User: c.GeneralParam.RuntimeAccountParam.AdminUser, + Password: c.GeneralParam.RuntimeAccountParam.AdminPwd, + }.ExcuteSqlByMySQLClientOne(schemaName, oneShemaInfo.NewDB) + if err != nil { + logger.Error("执行%s文件失败!", schemaName) + return err + } + } + return nil +} + +// OpenAreaImportData TODO +func (c *OpenAreaImportSchemaComp) OpenAreaImportData() (err error) { + for _, oneShemaInfo := range c.Params.OpenAreaParam { + dataFileName := fmt.Sprintf("%s.sql", oneShemaInfo.Schema) + + fmt.Printf("%+v\n", c) + fmt.Println(dataFileName) + fmt.Println(oneShemaInfo.NewDB) + + err = mysqlutil.ExecuteSqlAtLocal{ + IsForce: false, + Charset: c.charset, + NeedShowWarnings: false, + Host: c.Params.Host, + Port: c.Params.Port, + Socket: c.socket, + WorkDir: c.dumpDir, + User: c.GeneralParam.RuntimeAccountParam.AdminUser, + Password: c.GeneralParam.RuntimeAccountParam.AdminPwd, + }.ExcuteSqlByMySQLClientOne(dataFileName, oneShemaInfo.NewDB) + if err != nil { + logger.Error("执行%s文件失败!", dataFileName) + return err + } + } + return nil +} + +// CleanDumpDir TODO +func (c *OpenAreaImportSchemaComp) CleanDumpDir() (err error) { + return +} diff --git a/dbm-ui/backend/db_meta/models/mysql_open_area.py b/dbm-ui/backend/db_meta/models/mysql_open_area.py new file mode 100644 index 0000000000..46c4104326 --- /dev/null +++ b/dbm-ui/backend/db_meta/models/mysql_open_area.py @@ -0,0 +1,57 @@ +""" +TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-DB管理系统(BlueKing-BK-DBM) available. +Copyright (C) 2017-2023 THL A29 Limited, a Tencent company. All rights reserved. +Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. +You may obtain a copy of the License at https://opensource.org/licenses/MIT +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on +an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the +specific language governing permissions and limitations under the License. +""" +from django.db import models +from django.utils.translation import ugettext_lazy as _ + +from backend.bk_web.models import AuditedModel +from backend.constants import DEFAULT_BK_CLOUD_ID + + +class TendbOpenAreaConfig(AuditedModel): + bk_biz_id = models.IntegerField(default=0) + bk_cloud_id = models.IntegerField(default=DEFAULT_BK_CLOUD_ID, help_text=_("云区域 ID")) + config_name = models.CharField(max_length=256, default="") + domain_name = models.CharField(max_length=256, default="") + source_cluster_id = models.BigIntegerField(default=0) + cluster_type = models.CharField(max_length=64, default="") + + class Meta: + unique_together = ("bk_biz_id", "domain_name", "source_cluster_id", "cluster_type") + + +class TendbOpenAreaSubConfig(AuditedModel): + config = models.ManyToManyField(TendbOpenAreaConfig, blank=True) + db_schema_source_db = models.CharField(max_length=256, help_text=_("获取库表结构的源db"), unique=True) + # json字段,直接存储待克隆表结构列表,克隆所有表时为空列表 + db_schema_source_tblist = models.JSONField(help_text=_("获取表结构的源tb列表"), default=list) + # json字段,直接存储待克隆表数据列表,隆所有表时为空列表 + db_data_source_tblist = models.JSONField(help_text=_("获取数据的源tb列表"), default=list) + db_schema_source_db_model = models.CharField(max_length=256, help_text=_("目标db范式")) + # json字段,存储权限模板列表 + priv_init_id = models.JSONField(help_text=_("权限关联模板id"), default=list) + creator = models.CharField(max_length=100) + create_time = models.DateTimeField(auto_now_add=True, help_text=_("配置创建时间")) + updater = models.CharField(max_length=100) + update_time = models.DateTimeField(auto_now=True, help_text=_("配置创建时间")) + + class Meta: + unique_together = ( + "db_schema_source_db", + "db_schema_source_db_model", + "db_schema_source_tblist", + "db_data_source_tblist", + ) + + +class TendbOpenAreaConfigLog(AuditedModel): + config = models.ManyToManyField(TendbOpenAreaConfig, blank=True) + operator = models.CharField(max_length=100) + operate_time = models.DateTimeField(auto_now=True, help_text=_("配置修改时间")) + config_change_log = models.JSONField(help_text=_("开区配置修改记录")) diff --git a/dbm-ui/backend/flow/consts.py b/dbm-ui/backend/flow/consts.py index c373056282..fc089b4298 100644 --- a/dbm-ui/backend/flow/consts.py +++ b/dbm-ui/backend/flow/consts.py @@ -337,6 +337,10 @@ class DBActuatorActionEnum(str, StructuredEnum): TenDBClusterBackendSwitch = EnumField("cluster-backend-switch", _("TenDBCluster集群做后端切换")) TenDBClusterMigrateCutOver = EnumField("cluster-backend-migrate-cutover", _("TenDBCluster集群做后端的成对迁移")) DumpSchema = EnumField("dumpschema", _("为TBinlogDumper实例导出导入源表结构")) + MysqlOpenAreaDumpSchema = EnumField("open_area_dumpschema", _("Mysql开区导出库表结构")) + MysqlOpenAreaImportSchema = EnumField("open_area_importschema", _("Mysql开区导入库表结构")) + MysqlOpenAreaDumpData = EnumField("open_area_dumpdata", _("Mysql开区导出库表数据")) + MysqlOpenAreaImportData = EnumField("open_area_importdata", _("Mysql开区导入库表数据")) class RedisActuatorActionEnum(str, StructuredEnum): diff --git a/dbm-ui/backend/flow/engine/bamboo/scene/mysql/mysql_open_area_flow.py b/dbm-ui/backend/flow/engine/bamboo/scene/mysql/mysql_open_area_flow.py new file mode 100644 index 0000000000..af7c7a99a0 --- /dev/null +++ b/dbm-ui/backend/flow/engine/bamboo/scene/mysql/mysql_open_area_flow.py @@ -0,0 +1,349 @@ +# -*- coding: utf-8 -*- +""" +TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-DB管理系统(BlueKing-BK-DBM) available. +Copyright (C) 2017-2023 THL A29 Limited, a Tencent company. All rights reserved. +Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. +You may obtain a copy of the License at https://opensource.org/licenses/MIT +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on +an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the +specific language governing permissions and limitations under the License. +""" +import copy +from dataclasses import asdict +from typing import Dict, Optional + +from django.utils.translation import ugettext as _ + +from backend.configuration.constants import DBType +from backend.core.consts import BK_PKG_INSTALL_PATH +from backend.db_meta.enums import ClusterType, InstanceInnerRole, TenDBClusterSpiderRole +from backend.db_meta.exceptions import ClusterNotExistException, DBMetaException +from backend.db_meta.models import Cluster +from backend.flow.engine.bamboo.scene.common.builder import Builder, SubBuilder +from backend.flow.engine.bamboo.scene.common.get_file_list import GetFileList +from backend.flow.plugins.components.collections.mysql.exec_actuator_script import ExecuteDBActuatorScriptComponent +from backend.flow.plugins.components.collections.mysql.trans_flies import TransFileComponent +from backend.flow.utils.mysql.mysql_act_dataclass import DownloadMediaKwargs, ExecActuatorKwargs +from backend.flow.utils.mysql.mysql_act_playload import MysqlActPayload + + +class MysqlOpenAreaFlow(object): + def __init__(self, root_id: str, data: Optional[Dict]): + """ + tenDBHA导出导入库表结构、数据都在主db上进行 + tenDBCluster在中控主节点上导出导入库表结构、在spider节点上导出导入数据 + @param root_id : 任务流程定义的root_id + @param data : 单据传递参数 + """ + self.root_id = root_id + self.data = data + self.data["uid"] = self.data.get("uid") or self.root_id + self.uid = self.data["uid"] + + self.work_dir = f"{BK_PKG_INSTALL_PATH}/mysql_open_area" + self.schema_tar_file_name = f"{self.root_id}_schema.tar.gz" + self.schema_md5sum_file_name = f"{self.root_id}_schema.md5sum" + self.data_tar_file_name = f"{self.root_id}_data.tar.gz" + self.data_md5sum_file_name = f"{self.root_id}_data.md5sum" + + def __get_cluster_info(self, cluster_id: int, bk_biz_id: int, data_flag=False) -> dict: + """ + 获取集群基本信息 source与target共用 + @param cluster_id: + @param bk_biz_id: + @param data_flag: tenDBClusterspider节点上导出和导入数据,因此需要的是spider节点的信息 + @return: + """ + try: + # get查询时,结果只能有一个 查到多个结果会报错 + cluster = Cluster.objects.get(id=cluster_id, bk_biz_id=bk_biz_id) + except Cluster.DoesNotExist: + raise ClusterNotExistException(cluster_id=cluster_id, bk_biz_id=bk_biz_id, message=_("集群不存在")) + # 不同集群类型,下发的ip角色不一样 + # tenDBHA下发主db tenDBCluster库表下发中控主节点 数据下发spider节点 + if cluster.cluster_type == ClusterType.TenDBCluster.value: + if data_flag: + ip_port = ( + cluster.proxyinstance_set.filter( + tendbclusterspiderext__spider_role=TenDBClusterSpiderRole.SPIDER_MASTER + ) + .first() + .ip_port + ) + else: + ip_port = cluster.tendbcluster_ctl_primary_address() + elif cluster.cluster_type == ClusterType.TenDBHA.value: + ip_port = cluster.storageinstance_set.get(instance_inner_role=InstanceInnerRole.MASTER).ip_port + else: + raise DBMetaException(message=_("集群实例类型不适用于开区")) + + return { + "cluster_id": cluster.id, + "bk_cloud_id": cluster.bk_cloud_id, + "cluster_type": cluster.cluster_type, + "ip": ip_port.split(":")[0], + "port": int(ip_port.split(":")[1]), + "root_id": self.root_id, + } + + def __get_source_cluster(self, data_flag=False) -> dict: + """ + 获取源实例的库表结构,可指定表 + 区分tenDBHA与tenDBCluster + @return: + """ + source_cluster = self.__get_cluster_info( + cluster_id=self.data["source_cluster"], bk_biz_id=self.data["bk_biz_id"], data_flag=data_flag + ) + source_cluster["is_upload_bkrepo"] = self.__is_upload_bkrepo( + source_cluster=self.data["source_cluster"], target_clusters=self.data["target_clusters"] + ) + # 表列表只有在导出的时候才需要 导入的时候只需要知道新旧库名 + # 多个目标集群开区,但规则用的是一套相同的,因此取第一个获取库表开区规则 + if data_flag: + source_cluster["open_area_param"] = [ + {"schema": exec_obj["source_db"], "tables": exec_obj["data_tblist"]} + for exec_obj in self.data["target_clusters"][0]["execute_objects"] + if len(exec_obj["data_tblist"]) > 0 + ] + else: + source_cluster["open_area_param"] = [ + {"schema": exec_obj["source_db"], "tables": exec_obj["schema_tblist"]} + for exec_obj in self.data["target_clusters"][0]["execute_objects"] + ] + + return source_cluster + + def __get_target_cluster(self, data_flag=False) -> list: + """ + 获取目标集群相关信息 + 目标集群执行导入库表结构操作,需要知道原库表名称和新库表名称 + @return: + """ + target_clusters = [] + for tc in self.data["target_clusters"]: + target_cluster = self.__get_cluster_info( + cluster_id=tc["target_cluster"], bk_biz_id=self.data["bk_biz_id"], data_flag=data_flag + ) + if data_flag: + target_cluster["open_area_param"] = [ + {"schema": exec_obj["source_db"], "newdb": exec_obj["target_db"]} + for exec_obj in tc["execute_objects"] + if len(exec_obj["data_tblist"]) > 0 + ] + else: + target_cluster["open_area_param"] = [ + {"schema": exec_obj["source_db"], "newdb": exec_obj["target_db"]} + for exec_obj in tc["execute_objects"] + ] + + target_clusters.append(target_cluster) + + return target_clusters + + def __is_upload_bkrepo(self, source_cluster: int, target_clusters: list) -> bool: + """ + 本地开区,不用上传制品库 + 集群维度判断 + tendbcluster集群 库表结构在中控 数据在spider节点 但属于同一集群 + 有上传必然有下发 是否下发文件也用这个来判断 + @param source_cluster: + @param target_clusters: + @return: + """ + for tc in target_clusters: + if int(source_cluster) != int(tc["target_cluster"]): + # 只要存在跟源集群不一样的 就要上传制品库 + return True + + return False + + def __get_exec_ip_list(self, source_cluster: dict, target_clusters: list) -> list: + """ + 过滤需要下发act的IP + @param source_cluster: + @param target_clusters: + @return: + """ + exec_ip_list = [] + exec_ip_list.append(source_cluster["ip"]) + for tc in target_clusters: + if tc["ip"] not in exec_ip_list: + exec_ip_list.append() + + return exec_ip_list + + def __get_data_flag(self) -> bool: + """ + 判断是否需要迁移数据 + @return: + """ + execute_objects = self.data["target_clusters"][0]["execute_objects"] + for exe_obj in execute_objects: + if len(exe_obj["data_tblist"]) > 0: + return True + + return False + + def mysql_open_area_flow(self): + source_cluster_schema = self.__get_source_cluster(data_flag=False) + target_clusters_schema = self.__get_target_cluster(data_flag=False) + # 提取要下发act的机器ip(过滤重复) + exec_ip_list = self.__get_exec_ip_list(source_cluster_schema, target_clusters_schema) + + pipeline = Builder(root_id=self.root_id, data=self.data) + + pipeline.add_act( + act_name=_("下发db-actuator介质"), + act_component_code=TransFileComponent.code, + kwargs=asdict( + DownloadMediaKwargs( + bk_cloud_id=source_cluster_schema["bk_cloud_id"], + exec_ip=exec_ip_list, + file_list=GetFileList(db_type=DBType.MySQL).get_db_actuator_package(), + ) + ), + ) + + pipeline.add_act( + act_name=_("从源实例获取开区所需库表结构"), + act_component_code=ExecuteDBActuatorScriptComponent.code, + kwargs=asdict( + ExecActuatorKwargs( + bk_cloud_id=source_cluster_schema["bk_cloud_id"], + cluster_type=source_cluster_schema["cluster_type"], + cluster=source_cluster_schema, + exec_ip=source_cluster_schema["ip"], + get_mysql_payload_func=MysqlActPayload.get_open_area_dump_schema_payload.__name__, + ) + ), + ) + + # 本地开区,没有上传制品库,也不用下发 + if source_cluster_schema["is_upload_bkrepo"]: + # 目标集群下发库表文件,源集群不用下发 + exec_ip_list.remove(source_cluster_schema["ip"]) + pipeline.add_act( + act_name=_("下发开区库表文件"), + act_component_code=TransFileComponent.code, + kwargs=asdict( + DownloadMediaKwargs( + bk_cloud_id=0, + exec_ip=exec_ip_list, + file_target_path=self.work_dir, + file_list=GetFileList(db_type=DBType.MySQL).mysql_import_sqlfile( + path="mysql/sqlfile", filelist=[self.schema_tar_file_name, self.schema_md5sum_file_name] + ), + ) + ), + ) + + sub_pipelines = [] + for target_cluster in target_clusters_schema: + sub_pipeline = SubBuilder(root_id=self.root_id, data=self.data) + sub_pipeline.add_act( + act_name=_("向目标实例导入库表结构"), + act_component_code=ExecuteDBActuatorScriptComponent.code, + kwargs=asdict( + ExecActuatorKwargs( + bk_cloud_id=target_cluster["bk_cloud_id"], + cluster_type=target_cluster["cluster_type"], + cluster=target_cluster, + exec_ip=target_cluster["ip"], + get_mysql_payload_func=MysqlActPayload.get_open_area_import_schema_payload.__name__, + ) + ), + ) + + sub_pipelines.append(sub_pipeline.build_sub_process(sub_name=_("目标集群开区导入表结构流程"))) + pipeline.add_parallel_sub_pipeline(sub_flow_list=sub_pipelines) + + # 判断是否需要进行数据的迁移 + data_flag = self.__get_data_flag() + if data_flag: + pipeline.add_sub_pipeline(sub_flow=self.open_area_data_flow()) + + pipeline.run_pipeline() + + def open_area_data_flow(self): + """ + 用于构建导入导出数据的子流程 + 库表信息和导入导出表结构不一样 主要是指定表不一样 + 对于数据来说,空列表表示不导出数据,只有指定的时候才进行操作 + 另外,对于tenDBCluster集群来说,导出导入数据在spider节点上进行,需要单独再下发一次act + @return: + """ + # 获取导入导出数据集群参数 + source_cluster_data = self.__get_source_cluster(data_flag=True) + target_clusters_data = self.__get_target_cluster(data_flag=True) + # 获取之后操作的所有ip,过滤重复值 + exec_ip_list = self.__get_exec_ip_list(source_cluster_data, target_clusters_data) + + sub_pipeline = SubBuilder(root_id=self.root_id, data=copy.deepcopy(self.data)) + + # tenDBCluster集群需要给spider下发act + if source_cluster_data["cluster_type"] == ClusterType.TenDBCluster.value: + sub_pipeline.add_act( + act_name=_("下发db-actuator介质"), + act_component_code=TransFileComponent.code, + kwargs=asdict( + DownloadMediaKwargs( + bk_cloud_id=source_cluster_data["bk_cloud_id"], + exec_ip=exec_ip_list, + file_list=GetFileList(db_type=DBType.MySQL).get_db_actuator_package(), + ) + ), + ) + sub_pipeline.add_act( + act_name=_("从源实例获取开区所需库表数据"), + act_component_code=ExecuteDBActuatorScriptComponent.code, + kwargs=asdict( + ExecActuatorKwargs( + bk_cloud_id=source_cluster_data["bk_cloud_id"], + cluster_type=source_cluster_data["cluster_type"], + cluster=source_cluster_data, + exec_ip=source_cluster_data["ip"], + get_mysql_payload_func=MysqlActPayload.get_open_area_dump_data_payload.__name__, + ) + ), + ) + + # 本地开区,没有上传制品库,也不用下发 + if source_cluster_data["is_upload_bkrepo"]: + # 目标集群下发库表文件,源集群不用下发 + exec_ip_list.remove(source_cluster_data["ip"]) + sub_pipeline.add_act( + act_name=_("下发开区库表数据文件"), + act_component_code=TransFileComponent.code, + kwargs=asdict( + DownloadMediaKwargs( + bk_cloud_id=0, + exec_ip=exec_ip_list, + file_target_path=self.work_dir, + file_list=GetFileList(db_type=DBType.MySQL).mysql_import_sqlfile( + path="mysql/sqlfile", filelist=[self.data_tar_file_name, self.data_md5sum_file_name] + ), + ) + ), + ) + + acts_list = [] + for target_cluster in target_clusters_data: + acts_list.append( + { + "act_name": _("向目标实例导入库表数据"), + "act_component_code": ExecuteDBActuatorScriptComponent.code, + "kwargs": asdict( + ExecActuatorKwargs( + bk_cloud_id=target_cluster["bk_cloud_id"], + cluster_type=target_cluster["cluster_type"], + cluster=target_cluster, + exec_ip=target_cluster["ip"], + get_mysql_payload_func=MysqlActPayload.get_open_area_import_data_payload.__name__, + ) + ), + } + ) + sub_pipeline.add_parallel_acts(acts_list=acts_list) + + return sub_pipeline.build_sub_process(sub_name=_("开区数据迁移流程")) diff --git a/dbm-ui/backend/flow/engine/controller/mysql.py b/dbm-ui/backend/flow/engine/controller/mysql.py index 18d9d77902..14e546927f 100644 --- a/dbm-ui/backend/flow/engine/controller/mysql.py +++ b/dbm-ui/backend/flow/engine/controller/mysql.py @@ -27,6 +27,7 @@ from backend.flow.engine.bamboo.scene.mysql.mysql_master_fail_over import MySQLMasterFailOverFlow from backend.flow.engine.bamboo.scene.mysql.mysql_master_slave_switch import MySQLMasterSlaveSwitchFlow from backend.flow.engine.bamboo.scene.mysql.mysql_migrate_cluster_flow import MySQLMigrateClusterFlow +from backend.flow.engine.bamboo.scene.mysql.mysql_open_area_flow import MysqlOpenAreaFlow from backend.flow.engine.bamboo.scene.mysql.mysql_partition import MysqlPartitionFlow from backend.flow.engine.bamboo.scene.mysql.mysql_proxy_cluster_add import MySQLProxyClusterAddFlow from backend.flow.engine.bamboo.scene.mysql.mysql_proxy_cluster_switch import MySQLProxyClusterSwitchFlow @@ -501,3 +502,7 @@ def mysql_single_rename_database_scene(self): def mysql_ha_standardize_scene(self): flow = MySQLHAStandardizeFlow(root_id=self.root_id, data=self.ticket_data) flow.standardize() + + def mysql_open_area_scene(self): + flow = MysqlOpenAreaFlow(root_id=self.root_id, data=self.ticket_data) + flow.mysql_open_area_flow() diff --git a/dbm-ui/backend/flow/urls.py b/dbm-ui/backend/flow/urls.py index e1268983c5..577103a64b 100644 --- a/dbm-ui/backend/flow/urls.py +++ b/dbm-ui/backend/flow/urls.py @@ -63,6 +63,7 @@ from backend.flow.views.mysql_ha_switch import MySQLHASwitchSceneApiView from backend.flow.views.mysql_ha_truncate_data import MySQLHATruncateDataView from backend.flow.views.mysql_migrate_cluster import MigrateMysqlClusterSceneApiView +from backend.flow.views.mysql_open_area import MysqlOpenAreaSceneApiView from backend.flow.views.mysql_partition import MysqlPartitionSceneApiView from backend.flow.views.mysql_proxy_add import AddMySQLProxySceneApiView from backend.flow.views.mysql_proxy_switch import SwitchMySQLProxySceneApiView @@ -316,4 +317,5 @@ url("^scene/reduce_tbinlogumper$", ReduceTBinlogDumperSceneApiView.as_view()), url("^scene/switch_tbinlogumper$", SwitchTBinlogDumperSceneApiView.as_view()), url("^scene/tendbha_standardize$", TenDBHAStandardizeView.as_view()), + url("^scene/mysql_open_area$", MysqlOpenAreaSceneApiView.as_view()), ] diff --git a/dbm-ui/backend/flow/utils/mysql/mysql_act_playload.py b/dbm-ui/backend/flow/utils/mysql/mysql_act_playload.py index a6e4b3c39e..1d98d31630 100644 --- a/dbm-ui/backend/flow/utils/mysql/mysql_act_playload.py +++ b/dbm-ui/backend/flow/utils/mysql/mysql_act_playload.py @@ -1923,3 +1923,140 @@ def get_install_tmp_db_backup_payload(self, **kwargs): }, }, } + + def get_open_area_dump_schema_payload(self, **kwargs): + """ + 开区导出表结构 + @param kwargs: + @return: + """ + fileserver = {} + rsa = RSAHandler.get_or_generate_rsa_in_db(RSAConfigType.PROXYPASS.value) + db_cloud_token = RSAHandler.encrypt_password( + rsa.rsa_public_key.content, f"{self.bk_cloud_id}_dbactuator_token" + ) + + nginx_ip = DBCloudProxy.objects.filter(bk_cloud_id=self.bk_cloud_id).last().internal_address + bkrepo_url = f"http://{nginx_ip}/apis/proxypass" if self.bk_cloud_id else settings.BKREPO_ENDPOINT_URL + + if self.cluster["is_upload_bkrepo"]: + fileserver.update( + { + "url": bkrepo_url, + "bucket": settings.BKREPO_BUCKET, + "username": settings.BKREPO_USERNAME, + "password": settings.BKREPO_PASSWORD, + "project": settings.BKREPO_PROJECT, + "upload_path": BKREPO_SQLFILE_PATH, + } + ) + + return { + "db_type": DBActuatorTypeEnum.MySQL.value, # spider集群也用mysql类型 + "action": DBActuatorActionEnum.MysqlOpenAreaDumpSchema.value, + "payload": { + "general": {"runtime_account": self.account}, + "extend": { + "host": kwargs["ip"], + "port": self.cluster["port"], + "charset": "default", + "root_id": self.cluster["root_id"], + "bk_cloud_id": self.bk_cloud_id, + "db_cloud_token": db_cloud_token, + "dump_dir_name": f"{self.cluster['root_id']}_schema", + "fileserver": fileserver, + "open_area_param": self.cluster["open_area_param"], + }, + }, + } + + def get_open_area_import_schema_payload(self, **kwargs): + """ + 开区导入表结构 + @param kwargs: + @return: + """ + return { + "db_type": DBActuatorTypeEnum.MySQL.value, # spider集群也用mysql类型 + "action": DBActuatorActionEnum.MysqlOpenAreaImportSchema.value, + "payload": { + "general": {"runtime_account": self.account}, + "extend": { + "host": kwargs["ip"], + "port": self.cluster["port"], + "charset": "default", + "root_id": self.cluster["root_id"], + "bk_cloud_id": self.bk_cloud_id, + "dump_dir_name": f"{self.cluster['root_id']}_schema", + "open_area_param": self.cluster["open_area_param"], + }, + }, + } + + def get_open_area_dump_data_payload(self, **kwargs): + """ + 开区导出表数据 + @param kwargs: + @return: + """ + fileserver = {} + rsa = RSAHandler.get_or_generate_rsa_in_db(RSAConfigType.PROXYPASS.value) + db_cloud_token = RSAHandler.encrypt_password( + rsa.rsa_public_key.content, f"{self.bk_cloud_id}_dbactuator_token" + ) + + nginx_ip = DBCloudProxy.objects.filter(bk_cloud_id=self.bk_cloud_id).last().internal_address + bkrepo_url = f"http://{nginx_ip}/apis/proxypass" if self.bk_cloud_id else settings.BKREPO_ENDPOINT_URL + + if self.cluster["is_upload_bkrepo"]: + fileserver.update( + { + "url": bkrepo_url, + "bucket": settings.BKREPO_BUCKET, + "username": settings.BKREPO_USERNAME, + "password": settings.BKREPO_PASSWORD, + "project": settings.BKREPO_PROJECT, + "upload_path": BKREPO_SQLFILE_PATH, + } + ) + return { + "db_type": DBActuatorTypeEnum.MySQL.value, # spider集群也用mysql类型 + "action": DBActuatorActionEnum.MysqlOpenAreaDumpData.value, + "payload": { + "general": {"runtime_account": self.account}, + "extend": { + "host": kwargs["ip"], + "port": self.cluster["port"], + "charset": "default", + "root_id": self.cluster["root_id"], + "bk_cloud_id": self.bk_cloud_id, + "db_cloud_token": db_cloud_token, + "dump_dir_name": f"{self.cluster['root_id']}_data", + "fileserver": fileserver, + "open_area_param": self.cluster["open_area_param"], + }, + }, + } + + def get_open_area_import_data_payload(self, **kwargs): + """ + 开区导入表数据 + @param kwargs: + @return: + """ + return { + "db_type": DBActuatorTypeEnum.MySQL.value, # spider集群也用mysql类型 + "action": DBActuatorActionEnum.MysqlOpenAreaImportData.value, + "payload": { + "general": {"runtime_account": self.account}, + "extend": { + "host": kwargs["ip"], + "port": self.cluster["port"], + "charset": "default", + "root_id": self.cluster["root_id"], + "bk_cloud_id": self.bk_cloud_id, + "dump_dir_name": f"{self.cluster['root_id']}_data", + "open_area_param": self.cluster["open_area_param"], + }, + }, + } diff --git a/dbm-ui/backend/flow/views/mysql_open_area.py b/dbm-ui/backend/flow/views/mysql_open_area.py new file mode 100644 index 0000000000..ed94aed17d --- /dev/null +++ b/dbm-ui/backend/flow/views/mysql_open_area.py @@ -0,0 +1,31 @@ +""" +TencentBlueKing is pleased to support the open source community by making 蓝鲸智云-DB管理系统(BlueKing-BK-DBM) available. +Copyright (C) 2017-2023 THL A29 Limited, a Tencent company. All rights reserved. +Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. +You may obtain a copy of the License at https://opensource.org/licenses/MIT +Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on +an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the +specific language governing permissions and limitations under the License. +""" +import logging +import uuid + +from rest_framework.response import Response + +from backend.flow.engine.controller.mysql import MySQLController +from backend.flow.views.base import FlowTestView + +logger = logging.getLogger("root") + + +class MysqlOpenAreaSceneApiView(FlowTestView): + """ + api: /apis/v1/flow/scene/mysql_open_area + params: + """ + + def post(self, request): + root_id = uuid.uuid1().hex + test = MySQLController(root_id=root_id, ticket_data=request.data) + test.mysql_open_area_scene() + return Response({"root_id": root_id})